コード例 #1
0
    def __init__(self, scope: core.Stack, id: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        self.base_platform = BasePlatform(self, self.stack_name)

        self.fargate_task_image = aws_ecs_patterns.ApplicationLoadBalancedTaskImageOptions(
            image=aws_ecs.ContainerImage.from_registry(
                "brentley/ecsdemo-frontend"),
            container_port=3000,
            environment={
                "CRYSTAL_URL": "http://ecsdemo-crystal.service:3000/crystal",
                "NODEJS_URL": "http://ecsdemo-nodejs.service:3000"
            },
        )

        self.fargate_load_balanced_service = aws_ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            "FrontendFargateLBService",
            cluster=self.base_platform.ecs_cluster,
            cpu=256,
            memory_limit_mib=512,
            desired_count=1,
            public_load_balancer=True,
            cloud_map_options=self.base_platform.sd_namespace,
            task_image_options=self.fargate_task_image)

        self.fargate_load_balanced_service.service.connections.allow_to(
            self.base_platform.services_sec_grp,
            port_range=aws_ec2.Port(protocol=aws_ec2.Protocol.TCP,
                                    string_representation="frontendtobackend",
                                    from_port=3000,
                                    to_port=3000))
コード例 #2
0
    def __init__(self, scope: core.Construct, id: str,
                 e2e_security_group: aws_ec2.SecurityGroup, vpc: aws_ec2.Vpc,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        cluster_sec_group = aws_ec2.SecurityGroup(self,
                                                  'msk-cluster-sec-group',
                                                  vpc=vpc)
        cluster_sec_group.add_ingress_rule(peer=e2e_security_group,
                                           connection=aws_ec2.Port(
                                               string_representation='kafka',
                                               protocol=aws_ec2.Protocol.TCP,
                                               from_port=DEFAULT_KAFKA_PORT,
                                               to_port=DEFAULT_KAFKA_PORT))

        cluster = aws_msk.CfnCluster(
            self,
            'msk-cluster',
            cluster_name='cdk-test',
            number_of_broker_nodes=len(
                vpc.private_subnets),  # this is the minimum number needed
            kafka_version='2.3.1',
            broker_node_group_info=aws_msk.CfnCluster.
            BrokerNodeGroupInfoProperty(
                instance_type="kafka.m5.large",
                client_subnets=[
                    subnet.subnet_id for subnet in vpc.private_subnets
                ],
                security_groups=[cluster_sec_group.security_group_id]))

        core.CfnOutput(self, "arn", value=cluster.ref)
コード例 #3
0
ファイル: app.py プロジェクト: meerutech/omni-repo
    def __init__(self, scope: core.Stack, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create VPC
        self.vpc = ec2.Vpc.from_lookup(self, "VPC", vpc_name='api-gateway/VPC')

        # Create ECS Cluster
        self.ecs_cluster = ecs.Cluster(self, "ECSCluster", vpc=self.vpc)

        # This high level construct will build a docker image, ecr repo and connect the ecs service to allow pull access
        self.container_image = ecr.DockerImageAsset(self,
                                                    "Image",
                                                    directory="./")

        # Task definition details to define the frontend service container
        self.task_def = ecs_patterns.NetworkLoadBalancedTaskImageOptions(
            image=ecs.ContainerImage.from_ecr_repository(
                repository=self.container_image.repository),
            container_port=80,
            enable_logging=True,
            environment={"GIT_HASH": "12345"},
        )

        # Create the frontend service
        self.python_service = ecs_patterns.NetworkLoadBalancedFargateService(
            self,
            "PythonService",
            cpu=256,
            memory_limit_mib=512,
            cluster=self.ecs_cluster,
            desired_count=1,
            task_image_options=self.task_def,
            public_load_balancer=False,
        )

        self.python_service.service.connections.allow_from_any_ipv4(
            port_range=ec2.Port(
                protocol=ec2.Protocol.ALL,
                string_representation="All port 80",
                from_port=80,
            ),
            description="Allows traffic on port 80 from NLB")

        # Create VPC Link from API Gateway to NLB
        # TODO: Make api id dynamic
        self.rest_api = apigw.RestApi.from_rest_api_id(
            self, "APIGateway", rest_api_id="6znhu1vqp6")

        # TODO: Create stage variable for vpc links
        self.gateway_vpc_link = apigw.VpcLink(
            self,
            "VPCLink",
            description="VPC Link from API Gateway to ECS Python Service",
            targets=[self.python_service.load_balancer],
            vpc_link_name="ECS_VPC_LINK")
コード例 #4
0
ファイル: rds_contstruct.py プロジェクト: twistedFantasy/aws
    def __init__(self, scope: core.Construct, id: str, *, app_env: str,
                 vpc: ec2.Vpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        security_group_params = {
            'security_group_name': f'rds-{app_env}-test-security-group',
            'description': f'security group for rds test({app_env})',
            'vpc': vpc,
        }
        security_group = ec2.SecurityGroup(self, 'security-group',
                                           **security_group_params)
        security_group.add_ingress_rule(
            peer=ec2.Peer.ipv4("54.125.156.2/32"),
            connection=ec2.Port(
                string_representation='random office',
                protocol=ec2.Protocol.TCP,
                from_port=5432,
                to_port=5432,
            ))

        password = Param.value_for_string_parameter(
            self, f'/{app_env}/test/DATABASE_PASSWORD')
        rds_params = {
            'engine':
            rds.DatabaseInstanceEngine.POSTGRES,
            'database_name':
            Param.value_for_string_parameter(self,
                                             f'/{app_env}/test/DATABASE_NAME'),
            'master_username':
            Param.value_for_string_parameter(self,
                                             f'/{app_env}/test/DATABASE_USER'),
            'master_user_password':
            core.SecretValue.plain_text(password),
            'instance_class':
            ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3,
                                ec2.InstanceSize.MICRO),
            'instance_identifier':
            f'{app_env}-test',
            'backup_retention':
            core.Duration.days(7),
            'delete_automated_backups':
            True,
            'security_groups': [security_group],
            'storage_type':
            rds.StorageType.GP2,
            'allocated_storage':
            20,
            'engine_version':
            '11.5',
            'vpc':
            vpc,
        }
        self._rds = rds.DatabaseInstance(self, 'rds', **rds_params)
コード例 #5
0
ファイル: app.py プロジェクト: helecloud/redshift-query
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        nat_instance = aws_ec2.NatProvider.instance(
            instance_type=aws_ec2.InstanceType('t3a.nano'))

        vpc = aws_ec2.Vpc(self,
                          'VPC',
                          max_azs=1,
                          nat_gateway_provider=nat_instance)

        cluster_security_group = aws_ec2.SecurityGroup(
            self,
            'ClusterSecurityGroup',
            vpc=vpc,
            allow_all_outbound=True,
            description="Allow Glue Job to access redshift",
            security_group_name="serverless-redshift-query-testing-redshift")

        cluster = aws_redshift.Cluster(
            self,
            'Cluster',
            cluster_name='serverless-redshift-query-testing',
            master_user=aws_redshift.Login(master_username='******'),
            vpc=vpc,
            removal_policy=core.RemovalPolicy.DESTROY,
            security_groups=[cluster_security_group])

        app = aws_sam.CfnApplication(
            self,
            'RedshiftQueryGlueJob',
            location=
            'https://redshift-query.s3-eu-west-1.amazonaws.com/glue-job-template.yaml',
            parameters={
                'ClusterId': 'serverless-redshift-query-testing',
                'SQLStatements': "select 1;",
                'Loglevel': "ERROR"
            })

        glue_job_security_group_ref = app.get_att(
            'Outputs.SecurityGroup').to_string()
        glue_job_security_group = aws_ec2.SecurityGroup.from_security_group_id(
            self, 'GlueSecurityGroup', glue_job_security_group_ref)

        cluster_security_group.add_ingress_rule(
            peer=glue_job_security_group,
            connection=aws_ec2.Port(protocol=aws_ec2.Protocol.TCP,
                                    from_port=5439,
                                    to_port=5439,
                                    string_representation='Redshift Port'))

        app.node.add_dependency(cluster)
コード例 #6
0
    def provision(
        self,
        name: str,
        eks_version: eks.KubernetesVersion,
        private_api: bool,
        vpc: ec2.Vpc,
        bastion_sg: ec2.SecurityGroup,
    ):
        eks_sg = ec2.SecurityGroup(
            self.scope,
            "EKSSG",
            vpc=vpc,
            security_group_name=f"{name}-EKSSG",
            allow_all_outbound=False,
        )

        # Note: We can't tag the EKS cluster via CDK/CF: https://github.com/aws/aws-cdk/issues/4995
        cluster = eks.Cluster(
            self.scope,
            "eks",
            cluster_name=name,
            vpc=vpc,
            endpoint_access=eks.EndpointAccess.PRIVATE
            if private_api else None,
            vpc_subnets=[ec2.SubnetType.PRIVATE],
            version=eks_version,
            default_capacity=0,
            security_group=eks_sg,
        )

        if bastion_sg:
            cluster.cluster_security_group.add_ingress_rule(
                peer=bastion_sg,
                connection=ec2.Port(
                    protocol=ec2.Protocol("TCP"),
                    string_representation="API Access",
                    from_port=443,
                    to_port=443,
                ),
            )

        cdk.CfnOutput(self.scope,
                      "eks_cluster_name",
                      value=cluster.cluster_name)
        cdk.CfnOutput(
            self.scope,
            "eks_kubeconfig_cmd",
            value=
            f"aws eks update-kubeconfig --name {cluster.cluster_name} --region {self.scope.region} --role-arn {cluster.kubectl_role.role_arn}",
        )

        return cluster
    def add_security_group_rule(self,
                                sg_id: str,
                                protocol: _ec2.Protocol,
                                cidr_range: str = None,
                                prefix_list: str = None,
                                from_port: int = 0,
                                to_port: int = 0,
                                is_ingress: bool = True,
                                description: str = None):
        """add security group rule"""
        if cidr_range is None:
            cidr_range = self._vpc.vpc_cidr_block

        if from_port != 0 and to_port == 0:
            to_port = from_port

        if prefix_list is not None:
            peer = _ec2.Peer.prefix_list(prefix_list)
            rule_id = f'{sg_id}_{protocol.name}_prefixlist_{from_port}_{to_port}'
        else:
            peer = _ec2.Peer.ipv4(cidr_range)
            rule_id = f'{sg_id}_{protocol.name}_{cidr_range}_{from_port}_{to_port}'

        if is_ingress:
            self._security_groups[sg_id].add_ingress_rule(
                peer=peer,
                connection=_ec2.Port(string_representation=rule_id,
                                     protocol=protocol,
                                     from_port=from_port,
                                     to_port=to_port),
                description=description)
        else:
            self._security_groups[sg_id].add_egress_rule(
                peer=peer,
                connection=_ec2.Port(string_representation=rule_id,
                                     protocol=protocol,
                                     from_port=from_port,
                                     to_port=to_port),
                description=description)
コード例 #8
0
    def __configure_neptune(self) -> None:
        self.subnet_group = n.CfnDBSubnetGroup(
            self,
            'SubnetGroup',
            db_subnet_group_description='Portfolio Management',
            db_subnet_group_name='portfoliomgmtsubnetgroup',
            subnet_ids=[
                net.subnet_id for net in self.vpc._select_subnet_objects(
                    subnet_group_name='PortfolioMgmt')
            ])

        self.security_group = ec2.SecurityGroup(
            self,
            'SecGroup',
            vpc=self.vpc,
            allow_all_outbound=True,
            description='Security group for PortfolioMgmt feature')
        self.security_group.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port(protocol=ec2.Protocol.TCP,
                                string_representation='Neptune',
                                from_port=8182,
                                to_port=8182))

        self.neptune_cluster = n.CfnDBCluster(
            self,
            'NeptuneCluster',
            db_subnet_group_name=self.subnet_group.db_subnet_group_name,
            deletion_protection=False,
            iam_auth_enabled=False,
            storage_encrypted=True,
            db_cluster_identifier='portfoliomgmt',
            vpc_security_group_ids=[self.security_group.security_group_id])

        counter = 0
        for net in self.vpc._select_subnet_objects(
                subnet_group_name='PortfolioMgmt'):
            az_name = net.availability_zone
            counter += 1
            self.neptune_instance = n.CfnDBInstance(
                self,
                'NeptuneInstance-' + str(counter),
                availability_zone=az_name,
                db_instance_identifier='portmgmt-instance-' + str(counter),
                db_instance_class='db.t3.medium',
                allow_major_version_upgrade=False,
                auto_minor_version_upgrade=True,
                db_cluster_identifier=self.neptune_cluster.
                db_cluster_identifier,
                db_subnet_group_name=self.subnet_group.db_subnet_group_name)
コード例 #9
0
 def addSg(self, sg, cidr, proto, string_representation, from_port,
           to_port):
     if (proto == "TCP"):
         protocol = ec2.Protocol.TCP
     elif (proto == "UDP"):
         protocol = ec2.Protocol.UDP
     elif (proto == "ICMP"):
         protocol = ec2.Protocol.ICMP
     sg.add_ingress_rule(peer=ec2.Peer.ipv4(cidr),
                         connection=ec2.Port(
                             protocol=protocol,
                             string_representation=string_representation,
                             from_port=from_port,
                             to_port=to_port))
コード例 #10
0
    def open_port(self, port: int, peer: IPeer, ingress: bool = True) -> None:
        """
        Modifies a given security group by opening a specified port.

        :param port: Port to open (allow traffic).
        :param peer: Peer (a CIDR or another security group).
        :param ingress: Specifies whether it is configured for ingress or egress traffic.

        :return: No return.
        """
        assert port is not None
        assert peer is not None
        assert ingress is not None

        sg = self.__security_group

        if ingress:
            sg.add_ingress_rule(
                peer=peer,
                connection=aws_ec2.Port(
                    protocol=aws_ec2.Protocol.TCP,
                    string_representation=f'Ingress {port} rule.',
                    from_port=port,
                    to_port=port
                )
            )
        else:
            sg.add_egress_rule(
                peer=peer,
                connection=aws_ec2.Port(
                    protocol=aws_ec2.Protocol.TCP,
                    string_representation=f'Egress {port} rule.',
                    from_port=port,
                    to_port=port
                )
            )
コード例 #11
0
    def __init__(self, scope: core.Stack, id: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        self.base_platform = BasePlatform(self, self.stack_name)

        self.fargate_task_image = aws_ecs_patterns.ApplicationLoadBalancedTaskImageOptions(
            image=aws_ecs.ContainerImage.from_registry(
                "adam9098/ecsdemo-frontend"),
            container_port=3000,
            environment={
                "CRYSTAL_URL": "http://ecsdemo-crystal.service:3000/crystal",
                "NODEJS_URL": "http://ecsdemo-nodejs.service:3000",
                "REGION": getenv('AWS_DEFAULT_REGION')
            },
        )

        self.fargate_load_balanced_service = aws_ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            "FrontendFargateLBService",
            service_name='ecsdemo-frontend',
            cluster=self.base_platform.ecs_cluster,
            cpu=256,
            memory_limit_mib=512,
            desired_count=3,
            public_load_balancer=True,
            cloud_map_options=self.base_platform.sd_namespace,
            task_image_options=self.fargate_task_image)

        self.fargate_load_balanced_service.task_definition.add_to_task_role_policy(
            aws_iam.PolicyStatement(actions=['ec2:DescribeSubnets'],
                                    resources=['*']))

        self.fargate_load_balanced_service.service.connections.allow_to(
            self.base_platform.services_sec_grp,
            port_range=aws_ec2.Port(protocol=aws_ec2.Protocol.TCP,
                                    string_representation="frontendtobackend",
                                    from_port=3000,
                                    to_port=3000))

        # Enable Service Autoscaling
        self.autoscale = self.fargate_load_balanced_service.service.auto_scale_task_count(
            min_capacity=1, max_capacity=10)

        self.autoscale.scale_on_cpu_utilization(
            "CPUAutoscaling",
            target_utilization_percent=50,
            scale_in_cooldown=core.Duration.seconds(30),
            scale_out_cooldown=core.Duration.seconds(30))
コード例 #12
0
ファイル: vpce.py プロジェクト: dr-natetorious/aws-homenet
  def __init__(self, scope: core.Construct, id: str, vpc: ec2.IVpc, **kwargs) -> None:
    super().__init__(scope, id, **kwargs)

    self.vpc = vpc

    self.security_group = ec2.SecurityGroup(
      self, 'EndpointSecurity',
      vpc=vpc,
      allow_all_outbound=True,
      description='SG for AWS Resources in isolated subnet')

    self.security_group.add_ingress_rule(
      peer=ec2.Peer.any_ipv4(),
      connection=ec2.Port(
        protocol=ec2.Protocol.ALL,
        string_representation='Any source'))
コード例 #13
0
    def __init__(self, scope: core.Construct, id: str, nw_stack: core.Stack,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ec2_sg = _ec2.SecurityGroup(self,
                                    id='test-ec2-instance-sg',
                                    vpc=nw_stack.app_vpc)

        bastion_sg = _ec2.SecurityGroup(self,
                                        id='bastion-sg',
                                        vpc=nw_stack.app_vpc)

        prv = _ec2.Instance(self,
                            id='tgw_poc_instance',
                            instance_type=_ec2.InstanceType('t3a.nano'),
                            machine_image=_ec2.AmazonLinuxImage(),
                            key_name=EC2Stack.KEY_PAIR,
                            security_group=ec2_sg,
                            instance_name='tgw_nat_test_instance',
                            vpc=nw_stack.app_vpc,
                            vpc_subnets=_ec2.SubnetSelection(
                                subnet_type=_ec2.SubnetType.ISOLATED))

        bastion = _ec2.Instance(self,
                                id='tgw_poc_bastion',
                                instance_type=_ec2.InstanceType('t3a.nano'),
                                machine_image=_ec2.AmazonLinuxImage(),
                                key_name=EC2Stack.KEY_PAIR,
                                security_group=bastion_sg,
                                instance_name='tgw_test_bastion',
                                vpc=nw_stack.app_vpc,
                                vpc_subnets=_ec2.SubnetSelection(
                                    subnet_type=_ec2.SubnetType.PUBLIC))

        ssh_port = _ec2.Port(protocol=_ec2.Protocol.TCP,
                             string_representation="tcp_22",
                             from_port=EC2Stack.SSH_PORT,
                             to_port=EC2Stack.SSH_PORT)

        bastion_sg.add_ingress_rule(peer=_ec2.Peer.ipv4(EC2Stack.SSH_IP),
                                    connection=ssh_port,
                                    description='Allow SSH access from SSH_IP')

        ec2_sg.add_ingress_rule(
            peer=bastion_sg,
            connection=ssh_port,
            description='Allow SSH access from bastion host')
コード例 #14
0
    def __init__(self, scope: core.Construct, id: str, bmt_vpc: ec2.Vpc,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        db_subnet_group = rds.SubnetGroup(
            self,
            'Aurora',
            description='aurora subnet group',
            vpc=bmt_vpc,
            removal_policy=core.RemovalPolicy.DESTROY,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.ISOLATED))

        db_security_group = ec2.SecurityGroup(self, 'aurora-sg', vpc=bmt_vpc)

        db_security_group.add_ingress_rule(
            peer=ec2.Peer.ipv4('10.100.0.0/16'),
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="to allow from the vpc internal",
                from_port=3306,
                to_port=3306))

        param_group = rds.ParameterGroup(
            self,
            'bmt-aurora-param',
            engine=rds.DatabaseClusterEngine.AURORA_MYSQL)
        param_group.add_parameter("performance_schema", "1")

        rds.DatabaseCluster(
            self,
            'bmt-aurora-cluster',
            engine=rds.DatabaseClusterEngine.aurora_mysql(
                version=rds.AuroraMysqlEngineVersion.VER_2_07_1),
            instance_props=rds.InstanceProps(
                vpc=bmt_vpc,
                instance_type=ec2.InstanceType.of(
                    instance_class=ec2.InstanceClass.BURSTABLE3,
                    instance_size=ec2.InstanceSize.MEDIUM),
                security_groups=[db_security_group]),
            instances=1,
            subnet_group=db_subnet_group,
            parameter_group=param_group,
            removal_policy=core.RemovalPolicy.DESTROY)
コード例 #15
0
    def __init__(self, scope: core.Construct, id: str, vpc: aws_ec2.Vpc,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        _subnets = []
        _subnets.append(
            aws_ec2.Subnet(self,
                           'sbn-sourcedb-1',
                           availability_zone=vpc.availability_zones[0],
                           vpc_id=vpc.vpc_id,
                           cidr_block='10.0.1.0/24'))

        _subnets.append(
            aws_ec2.Subnet(self,
                           'sbn-sourcedb-2',
                           availability_zone=vpc.availability_zones[1],
                           vpc_id=vpc.vpc_id,
                           cidr_block='10.0.2.0/24'))

        db_security_group = aws_ec2.SecurityGroup(self, 'sg-src-db', vpc=vpc)

        db_security_group.add_ingress_rule(
            peer=aws_ec2.Peer.ipv4('10.0.5.192/26'),
            connection=aws_ec2.Port(
                protocol=aws_ec2.Protocol.TCP,
                string_representation="to allow traffic from the cmd",
                from_port=5432,
                to_port=5432))

        _postgres_instance = rds.DatabaseInstance(
            self,
            'src_ora',
            engine=rds.DatabaseInstanceEngine.POSTGRES,
            vpc=vpc,
            vpc_subnets=aws_ec2.SubnetSelection(subnets=_subnets),
            security_groups=[db_security_group])

        core.CfnOutput(self,
                       'secret_name',
                       value=_postgres_instance.secret.secret_name)

        self._secret_name = _postgres_instance.secret.secret_name
        self._secret_arn = _postgres_instance.secret.secret_arn
コード例 #16
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.IVpc,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.security_group = ec2.SecurityGroup(
            self,
            'EndpointSecurity',
            vpc=vpc,
            allow_all_outbound=True,
            description='SG for AWS Resources in isolated subnet')

        self.security_group.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port(protocol=ec2.Protocol.ALL,
                                string_representation='Any source'))

        self.gateways = {}
        for svc in ['s3', 'dynamodb']:
            self.gateways[svc] = ec2.GatewayVpcEndpoint(
                self,
                svc,
                vpc=vpc,
                service=ec2.GatewayVpcEndpointAwsService(name=svc))

        self.interfaces = {}
        for svc in [
                'ssm', 'ec2messages', 'ec2', 'ssmmessages', 'kms',
                'elasticloadbalancing', 'elasticfilesystem', 'lambda',
                'states', 'events', 'execute-api', 'kinesis-streams',
                'kinesis-firehose', 'logs', 'sns', 'sqs', 'secretsmanager',
                'config', 'ecr.api', 'ecr.dkr'
        ]:

            self.interfaces[svc] = ec2.InterfaceVpcEndpoint(
                self,
                svc,
                vpc=vpc,
                service=ec2.InterfaceVpcEndpointAwsService(name=svc),
                open=True,
                private_dns_enabled=True,
                lookup_supported_azs=False,
                security_groups=[self.security_group])
コード例 #17
0
ファイル: more.py プロジェクト: bilardi/aws-tool-comparison
 def get_security_group(self, ec2_params):
     security_group = None
     if 'security_group' in ec2_params:
         security_group = ec2_params['security_group']
     else:
         if 'security_group_id' in ec2_params and ec2_params[
                 'security_group_id']:
             security_group = ec2.SecurityGroup.from_security_group_id(
                 self,
                 "SecurityGroup",
                 security_group_id=ec2_params['security_group_id'],
                 mutable=False)
         else:
             security_group = ec2.SecurityGroup(self,
                                                "SecurityGroup",
                                                vpc=ec2_params['vpc'])
             security_group.add_ingress_rule(
                 peer=ec2.Peer.any_ipv4(),
                 connection=ec2.Port(string_representation="sr",
                                     protocol=ec2.Protocol("UDP"),
                                     from_port=ec2_params['from_port'],
                                     to_port=ec2_params['to_port']))
     return security_group
    def __init__(self, scope: core.Construct, id: str, config_dict,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        """ Get VPC details """
        vpc = ec2.Vpc.from_lookup(self, "VPC", vpc_id=config_dict['vpc_id'])
        """ Create Security Group for Batch Env """
        batch_security_group = "datalake-batch-security-group"

        createBatchSecurityGroup = ec2.SecurityGroup(
            self,
            "createBatchSecurityGroup",
            vpc=vpc,
            allow_all_outbound=True,
            description=
            "This security group will be used for AWS Batch Compute Env",
            security_group_name=batch_security_group)

        createBatchSecurityGroup.add_ingress_rule(
            peer=ec2.Peer.ipv4("0.0.0.0/0"),
            connection=ec2.Port(protocol=ec2.Protocol.TCP,
                                string_representation="ingress_rule",
                                from_port=22,
                                to_port=22))

        createBatchSecurityGroup.add_egress_rule(
            peer=ec2.Peer.ipv4("0.0.0.0/0"),
            connection=ec2.Port(protocol=ec2.Protocol.TCP,
                                string_representation="egress_rule",
                                from_port=-1,
                                to_port=-1))

        core.CfnOutput(self,
                       "createBatchSecurityGroupId",
                       value=createBatchSecurityGroup.security_group_id)
        """ Create IAM Role for ecsInstance """
        createECSInstanceRole = iam.Role(
            self,
            "createECSInstanceRole",
            assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"),
            description=
            "This instance role will be used by the ECS cluster instances",
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonEC2FullAccess"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonS3FullAccess"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AWSBatchFullAccess"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "SecretsManagerReadWrite"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonAthenaFullAccess"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/"
                    "AmazonEC2ContainerServiceforEC2Role"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSBatchServiceRole")
            ],
            role_name="datalake-ecsinstance-role")

        createInstanceProfile = iam.CfnInstanceProfile(
            self,
            "createInstanceProfile",
            roles=[createECSInstanceRole.role_name],
            instance_profile_name="datalake-ecsinstance-role")

        useECSInstanceProfile = createInstanceProfile.instance_profile_name

        core.CfnOutput(self,
                       "createECSInstanceRoleName",
                       value=createECSInstanceRole.role_name)
        """ Create Spot Fleet Role """
        createSpotFleetRole = iam.Role(
            self,
            'createSpotFleetRole',
            assumed_by=iam.ServicePrincipal("spotfleet.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AmazonEC2SpotFleetTaggingRole")
            ])

        core.CfnOutput(self,
                       "createSpotFleetRoleName",
                       value=createSpotFleetRole.role_name)

        useSpotFleetRole = createSpotFleetRole.without_policy_updates()
        """ Create Batch Service Role """
        createBatchServiceRole = iam.Role(
            self,
            'createBatchServiceRole',
            assumed_by=iam.ServicePrincipal("batch.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSBatchServiceRole")
            ])

        core.CfnOutput(self,
                       "createBatchServiceRoleName",
                       value=createBatchServiceRole.role_name)

        useBatchServiceRole = createBatchServiceRole.without_policy_updates()
        """ Create Compute Environment """

        subnet_1 = ec2.Subnet.from_subnet_attributes(
            self,
            "subnet_1",
            subnet_id=config_dict['SubnetIds'].split(",")[0],
            availability_zone=config_dict['AvailabilityZones'].split(",")[0])
        subnet_2 = ec2.Subnet.from_subnet_attributes(
            self,
            "subnet_2",
            subnet_id=config_dict['SubnetIds'].split(",")[1],
            availability_zone=config_dict['AvailabilityZones'].split(",")[1])

        createBatchComputeEnv = batch.ComputeEnvironment(
            self,
            "createBatchComputeEnv",
            compute_environment_name="datalake-compute-env",
            service_role=useBatchServiceRole,
            compute_resources=batch.ComputeResources(
                vpc=vpc,
                type=batch.ComputeResourceType.SPOT,
                bid_percentage=60,
                desiredv_cpus=0,
                maxv_cpus=100,
                minv_cpus=0,
                security_groups=[createBatchSecurityGroup],
                vpc_subnets=ec2.SubnetSelection(subnets=[subnet_1, subnet_2]),
                instance_role=useECSInstanceProfile,
                spot_fleet_role=useSpotFleetRole,
                compute_resources_tags=core.Tag.add(
                    self, 'Name', 'Datalake Pipeline Instance')))

        core.CfnOutput(self,
                       "createBatchComputeEnvName",
                       value=createBatchComputeEnv.compute_environment_name)

        getIComputeEnvObject = batch.ComputeEnvironment.from_compute_environment_arn(
            self,
            "getComputeEnvAtrributes",
            compute_environment_arn=createBatchComputeEnv.
            compute_environment_arn)
        """ Create Batch Job Queue """
        createBatchJobQueue = batch.JobQueue(
            self,
            "createBatchJobQueue",
            compute_environments=[
                batch.JobQueueComputeEnvironment(
                    compute_environment=getIComputeEnvObject, order=1)
            ],
            enabled=True,
            job_queue_name="datalake-job-queue",
            priority=1)

        core.CfnOutput(self,
                       "createBatchJobQueueName",
                       value=createBatchJobQueue.job_queue_name)
        """ Create ECR Repo for datalake images """
        createECRRepo = ecr.Repository(
            self,
            "createECRRepo",
            repository_name=config_dict['workflow_ecr_repo'])

        core.CfnOutput(self,
                       "createECRRepoName",
                       value=createECRRepo.repository_name)
コード例 #19
0
    def __init__(self, app: core.App, id: str, **kwargs) -> None:
        super().__init__(app, id, **kwargs)

        # -- VPC
        vpc = ec2.Vpc(self, "vpc_airflow")
        # ecr
        ecr_repo = ecr.Repository.from_repository_name(self,
                                                       "ecr_repo_airflow",
                                                       "airflow")
        # rds
        sg_airflow_backend_db = ec2.SecurityGroup(
            self,
            "sg_airflow_backend_database",
            vpc=vpc,
            description="Airflow backend database",
            security_group_name="sg_airflow_backend_database",
        )
        db = rds.DatabaseInstance(
            self,
            "rds_airfow_backend",
            master_username="******",
            master_user_password=core.SecretValue.plain_text("postgres"),
            database_name="airflow",
            engine=rds.DatabaseInstanceEngine.postgres(
                version=rds.PostgresEngineVersion.VER_11_8),
            vpc=vpc,
            instance_type=ec2.InstanceType.of(
                ec2.InstanceClass.BURSTABLE3,
                ec2.InstanceSize.MICRO,
            ),
            instance_identifier="airflow-backend",
            removal_policy=core.RemovalPolicy.DESTROY,
            deletion_protection=False,
            security_groups=[sg_airflow_backend_db],
            vpc_placement=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC),
        )
        # -- ElasticCache Redis
        sg_redis = ec2.SecurityGroup(
            self,
            "sg_redis",
            vpc=vpc,
            description="Airflow redis",
            security_group_name="sg_redis",
        )
        redis_subnet_group = ec.CfnSubnetGroup(
            self,
            "airflow-redis-subnet-group",
            description="For Airflow Task Queue",
            subnet_ids=vpc.select_subnets(
                subnet_type=ec2.SubnetType.PRIVATE).subnet_ids,
            cache_subnet_group_name="airflow-redis-task-queue",
        )
        redis = ec.CfnCacheCluster(
            self,
            "redis",
            cluster_name="airflow-redis",
            cache_node_type="cache.t2.micro",
            engine="redis",
            num_cache_nodes=1,
            auto_minor_version_upgrade=True,
            engine_version="5.0.6",
            port=REDIS_PORT,
            cache_subnet_group_name=redis_subnet_group.ref,
            vpc_security_group_ids=[sg_redis.security_group_id],
        )
        # ECS cluster
        cluster = ecs.Cluster(
            self,
            "ecs_airflow",
            cluster_name="airflow",
            vpc=vpc,
            container_insights=True,
        )
        # scheduler
        scheduler_task_role = iam.Role(
            self,
            "iam_role_scheduler",
            assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"),
            description="IAM role for ECS Scheduler service",
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonEC2ContainerRegistryReadOnly"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "CloudWatchLogsFullAccess"),
            ],
            role_name="airflow-ecs-scheduler-task",
        )
        scheduler_task = ecs.FargateTaskDefinition(
            self,
            "ecs_task_scheduler",
            cpu=512,
            memory_limit_mib=2048,
            task_role=scheduler_task_role,
        )
        scheduler_task.add_container(
            "scheduler",
            command=["scheduler"],
            # credentials should be provided from Secrets Manager
            environment={
                "LOAD_EX": "n",
                "FERNET_KEY": "46BKJoQYlPPOexq0OhDZnIlNepKFf87WFwLbfzqDDho=",
                "EXECUTOR": "Celery",
                "POSTGRES_HOST": db.db_instance_endpoint_address,
                "POSTGRES_USER": "******",
                "POSTGRES_PASSWORD": "******",
                "POSTGRES_DB": "airflow",
                "REDIS_HOST": redis.attr_redis_endpoint_address,
            },
            image=ecs.ContainerImage.from_ecr_repository(
                ecr_repo,
                "1.10.9",
            ),
            logging=ecs.LogDriver.aws_logs(
                stream_prefix="scheduler",
                log_group=logs.LogGroup(
                    self,
                    "log-airflow-scheduler",
                    log_group_name="ecs/airflow/scheduler",
                    retention=logs.RetentionDays.ONE_WEEK,
                ),
            ),
        )
        sg_airflow_scheduler = ec2.SecurityGroup(
            self,
            "sg_airflow_scheduler",
            vpc=vpc,
            description="Airflow Scheduler service",
            security_group_name="sg_airflow_scheduler",
        )
        sg_redis.add_ingress_rule(
            peer=sg_airflow_scheduler,
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="from scheduler",
                from_port=REDIS_PORT,
                to_port=REDIS_PORT,
            ),
            description="from scheduler service",
        )
        sg_airflow_backend_db.add_ingress_rule(
            peer=sg_airflow_scheduler,
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="from home",
                from_port=POSTGRES_PORT,
                to_port=POSTGRES_PORT,
            ),
            description="home",
        )
        scheduler_service = ecs.FargateService(
            self,
            "ecs_service_scheduler",
            cluster=cluster,
            task_definition=scheduler_task,
            desired_count=1,
            security_groups=[sg_airflow_scheduler],
            service_name="scheduler",
        )
        # flower
        flower_task_role = iam.Role(
            self,
            "iam_role_flower",
            assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"),
            description="IAM role for ECS Flower service",
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonEC2ContainerRegistryReadOnly"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "CloudWatchLogsFullAccess"),
            ],
            role_name="airflow-ecs-flower-task",
        )
        flower_task = ecs.FargateTaskDefinition(
            self,
            "ecs_task_flower",
            cpu=512,
            memory_limit_mib=1024,
            task_role=scheduler_task_role,
        )
        flower_task.add_container(
            "flower",
            command=["flower"],
            # credentials should be provided from Secrets Manager
            environment={
                "LOAD_EX": "n",
                "FERNET_KEY": "46BKJoQYlPPOexq0OhDZnIlNepKFf87WFwLbfzqDDho=",
                "EXECUTOR": "Celery",
                "REDIS_HOST": redis.attr_redis_endpoint_address,
            },
            image=ecs.ContainerImage.from_ecr_repository(
                ecr_repo,
                "1.10.9",
            ),
            logging=ecs.LogDriver.aws_logs(
                stream_prefix="flower",
                log_group=logs.LogGroup(
                    self,
                    "log-airflow-flower",
                    log_group_name="ecs/airflow/flower",
                    retention=logs.RetentionDays.ONE_WEEK,
                ),
            ),
        ).add_port_mappings(
            ecs.PortMapping(
                container_port=FLOWER_PORT,
                host_port=FLOWER_PORT,
                protocol=ecs.Protocol.TCP,
            ))
        sg_airflow_flower = ec2.SecurityGroup(
            self,
            "sg_airflow_flower",
            vpc=vpc,
            description="Airflow Flower service",
            security_group_name="sg_airflow_flower",
        )
        sg_airflow_flower.add_ingress_rule(
            peer=ec2.Peer.ipv4("115.66.217.45/32"),
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="from homr",
                from_port=FLOWER_PORT,
                to_port=FLOWER_PORT,
            ),
            description="from home",
        )
        sg_redis.add_ingress_rule(
            peer=sg_airflow_flower,
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="from flower",
                from_port=REDIS_PORT,
                to_port=REDIS_PORT,
            ),
            description="from flower",
        )
        flower_service = ecs.FargateService(
            self,
            "ecs_service_flower",
            cluster=cluster,
            task_definition=flower_task,
            desired_count=1,
            security_groups=[sg_airflow_flower],
            service_name="flower",
        )
        # worker
        worker_task_role = iam.Role(
            self,
            "iam_role_worker",
            assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"),
            description="IAM role for ECS worker service",
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonEC2ContainerRegistryReadOnly"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "CloudWatchLogsFullAccess"),
            ],
            role_name="airflow-ecs-worker-task",
        )
        worker_task = ecs.FargateTaskDefinition(
            self,
            "ecs_task_worker",
            cpu=1024,
            memory_limit_mib=3072,
            task_role=worker_task_role,
        )
        worker_task.add_container(
            "worker",
            command=["worker"],
            # credentials should be provided from Secrets Manager
            environment={
                "LOAD_EX": "n",
                "FERNET_KEY": "46BKJoQYlPPOexq0OhDZnIlNepKFf87WFwLbfzqDDho=",
                "EXECUTOR": "Celery",
                "POSTGRES_HOST": db.db_instance_endpoint_address,
                "POSTGRES_USER": "******",
                "POSTGRES_PASSWORD": "******",
                "POSTGRES_DB": "airflow",
                "REDIS_HOST": redis.attr_redis_endpoint_address,
            },
            image=ecs.ContainerImage.from_ecr_repository(
                ecr_repo,
                "1.10.9",
            ),
            logging=ecs.LogDriver.aws_logs(
                stream_prefix="worker",
                log_group=logs.LogGroup(
                    self,
                    "log-airflow-worker",
                    log_group_name="ecs/airflow/worker",
                    retention=logs.RetentionDays.ONE_WEEK,
                ),
            ),
        )
        sg_airflow_worker = ec2.SecurityGroup(
            self,
            "sg_airflow_worker",
            vpc=vpc,
            description="Airflow worker service",
            security_group_name="sg_airflow_worker",
        )
        sg_redis.add_ingress_rule(
            peer=sg_airflow_worker,
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="from worker",
                from_port=REDIS_PORT,
                to_port=REDIS_PORT,
            ),
            description="from worker service",
        )
        sg_airflow_backend_db.add_ingress_rule(
            peer=sg_airflow_worker,
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="from worker",
                from_port=POSTGRES_PORT,
                to_port=POSTGRES_PORT,
            ),
            description="From worker",
        )
        worker_service = ecs.FargateService(
            self,
            "ecs_service_worker",
            cluster=cluster,
            task_definition=worker_task,
            desired_count=1,
            security_groups=[sg_airflow_worker],
            service_name="worker",
        )
        # web server
        web_server_task_role = iam.Role(
            self,
            "iam_role_web_server",
            assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"),
            description="IAM role for ECS web server service",
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonEC2ContainerRegistryReadOnly"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "CloudWatchLogsFullAccess"),
            ],
            role_name="airflow-ecs-web-server-task",
        )
        web_server_task = ecs.FargateTaskDefinition(
            self,
            "ecs_task_web_server",
            cpu=512,
            memory_limit_mib=1024,
            task_role=web_server_task_role,
        )
        web_server_task.add_container(
            "web_server",
            command=["webserver"],
            # credentials should be provided from Secrets Manager
            environment={
                "LOAD_EX": "n",
                "FERNET_KEY": "46BKJoQYlPPOexq0OhDZnIlNepKFf87WFwLbfzqDDho=",
                "EXECUTOR": "Celery",
                "POSTGRES_HOST": db.db_instance_endpoint_address,
                "POSTGRES_USER": "******",
                "POSTGRES_PASSWORD": "******",
                "POSTGRES_DB": "airflow",
                "REDIS_HOST": redis.attr_redis_endpoint_address,
            },
            image=ecs.ContainerImage.from_ecr_repository(
                ecr_repo,
                "1.10.9",
            ),
            logging=ecs.LogDriver.aws_logs(
                stream_prefix="web_server",
                log_group=logs.LogGroup(
                    self,
                    "log-airflow-web-server",
                    log_group_name="ecs/airflow/web-server",
                    retention=logs.RetentionDays.ONE_WEEK,
                ),
            ),
        ).add_port_mappings(
            ecs.PortMapping(
                container_port=WEB_SERVER_PORT,
                host_port=WEB_SERVER_PORT,
                protocol=ecs.Protocol.TCP,
            ))
        sg_airflow_web_server = ec2.SecurityGroup(
            self,
            "sg_airflow_web_server",
            vpc=vpc,
            description="Airflow web server service",
            security_group_name="sg_airflow_web_server",
        )
        sg_airflow_backend_db.add_ingress_rule(
            peer=sg_airflow_web_server,
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="From web server",
                from_port=POSTGRES_PORT,
                to_port=POSTGRES_PORT,
            ),
            description="From web server",
        )
        sg_airflow_backend_db.add_ingress_rule(
            peer=sg_airflow_web_server,
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="From web server",
                from_port=POSTGRES_PORT,
                to_port=POSTGRES_PORT,
            ),
            description="From web server",
        )
        sg_redis.add_ingress_rule(
            peer=sg_airflow_web_server,
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="from web server",
                from_port=REDIS_PORT,
                to_port=REDIS_PORT,
            ),
            description="from web server",
        )
        web_server_service = ecs.FargateService(
            self,
            "ecs_service_web_server",
            cluster=cluster,
            task_definition=web_server_task,
            desired_count=1,
            security_groups=[sg_airflow_web_server],
            service_name="web_server",
        )
        # Load balancer
        sg_airflow_alb = ec2.SecurityGroup(
            self,
            "sg_airflow_alb",
            vpc=vpc,
            description="Airflow ALB",
            security_group_name="sg_airflow_alb",
        )
        # ALB -> web server
        sg_airflow_web_server.add_ingress_rule(
            peer=sg_airflow_alb,
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="From ALB",
                from_port=WEB_SERVER_PORT,
                to_port=WEB_SERVER_PORT,
            ),
            description="From ALB",
        )
        # ALB -> flower
        sg_airflow_flower.add_ingress_rule(
            peer=sg_airflow_alb,
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="From ALB",
                from_port=FLOWER_PORT,
                to_port=FLOWER_PORT,
            ),
            description="From ALB",
        )
        # Home -> ALB
        sg_airflow_alb.add_ingress_rule(
            peer=ec2.Peer.ipv4(MY_IP_CIDR),
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="From Home",
                from_port=ALB_PORT,
                to_port=ALB_PORT,
            ),
            description="From Home",
        )
        # Home -> ALB
        sg_airflow_alb.add_ingress_rule(
            peer=ec2.Peer.ipv4(MY_IP_CIDR),
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="From Home",
                from_port=FLOWER_PORT,
                to_port=FLOWER_PORT,
            ),
            description="From Home",
        )
        alb = elb.ApplicationLoadBalancer(
            self,
            "alb_airflow",
            internet_facing=True,
            security_group=sg_airflow_alb,
            vpc=vpc,
            load_balancer_name="alb-airflow",
        )
        listener1 = alb.add_listener(
            "alb_airflow_listener1",
            open=False,
            port=ALB_PORT,
            protocol=elb.ApplicationProtocol.HTTP,
            default_target_groups=[
                elb.ApplicationTargetGroup(
                    self,
                    "alb_airflow_target_group_web_server",
                    port=WEB_SERVER_PORT,
                    protocol=elb.ApplicationProtocol.HTTP,
                    target_group_name="alb-tg-airflow-web-server",
                    targets=[web_server_service],
                    vpc=vpc,
                )
            ],
        )
        alb.add_listener(
            "alb_airflow_listener2",
            open=False,
            port=FLOWER_PORT,
            protocol=elb.ApplicationProtocol.HTTP,
            default_target_groups=[
                elb.ApplicationTargetGroup(
                    self,
                    "alb_airflow_target_group_flower",
                    port=FLOWER_PORT,
                    protocol=elb.ApplicationProtocol.HTTP,
                    target_group_name="alb-tg-aiflow-flower",
                    targets=[flower_service],
                    vpc=vpc,
                )
            ],
        )
コード例 #20
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # EC2 Vpc construct

        vpc = ec2.Vpc(
            self,
            id="cromwell_server_vpc",
            max_azs=2
        )

        # ECS Cluster construct
        cluster = ecs.Cluster(
            self,
            id="cromwell_cluster",
            vpc=vpc
        )

        # IAM roles
        ecstaskexecutionrole = iam.Role.from_role_arn(
            self,
            "ecstaskexecutionrole",
            role_arn="arn:aws:iam::562965587442:role/ecsTaskExecutionRole"
        )

        batch_service_role = iam.Role.from_role_arn(
            self,
            "batchservicerole",
            role_arn="arn:aws:iam::562965587442:role/AWSBatchServiceRole"
        )

        fargate_cromwell_role = iam.Role.from_role_arn(
            self,
            "fargate_cromwell_role",
            role_arn="arn:aws:iam::562965587442:role/fargate_cromwell_role"
        )

        # Cromwell docker image from ECR
        container_img = ecr.Repository.from_repository_name(
            self,
            "cromwell_docker_image",
            repository_name=CROMWELL_REPOSITORY_NAME
        )

        # ECS task definition construct
        task_def = ecs.TaskDefinition(
            self,
            "cromwell_server_task",
            execution_role=ecstaskexecutionrole,
            task_role=fargate_cromwell_role,
            compatibility=ecs.Compatibility.FARGATE,
            cpu="1024",
            memory_mib="4096"
        )

        # ECS container definition construct
        container_def = ecs.ContainerDefinition(
            self,
            "cromwell_container",
            task_definition=task_def,
            image=ecs.ContainerImage.from_ecr_repository(
                repository=container_img,
                tag=CROMWELL_IMAGE_TAG
            ),
            command=["bash", "run_cromwell_server.sh"],
            cpu=1,
            health_check=None,
            working_directory='/',
            logging=ecs.LogDriver.aws_logs(
                stream_prefix="cromwell_logs",
                datetime_format=None,
                log_group=None,
                log_retention=None,
                multiline_pattern=None
            )
        )
        container_def.add_port_mappings(
            ecs.PortMapping(
                container_port=CROMWELL_PORT_NUMBER,
                host_port=CROMWELL_PORT_NUMBER,
                protocol=ecs.Protocol.TCP
            )
        )

        # EC2 Security Group construct
        security_group = ec2.SecurityGroup(
            self,
            "cromwell_server_security_group",
            vpc=vpc,
            allow_all_outbound=True,
            security_group_name="cromwell_server_security_group",
            description="This is the security group assigned to the cromwell server running as a Fargate service.",
        )
        security_group.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port(protocol=ecs.Protocol.TCP,
                                from_port=CROMWELL_PORT_NUMBER,
                                to_port=CROMWELL_PORT_NUMBER,
                                string_representation="cromwell_server_port"),
            remote_rule=None
        )

        # ECS Fargate Service construct
        service = ecs.FargateService(
            self,
            "cromwell_service",
            task_definition=task_def,
            cluster=cluster,
            service_name="cromwell_server_service",
            assign_public_ip=True,
            desired_count=1,
            security_group=security_group
        )


        # Batch resources
        # Reference:
        # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html
        # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-launchtemplate-launchtemplatedata.html
        # with open("lib/aws_batch_launch_template_user_data.txt", 'r') as user_data_file:
        #     user_data = user_data_file.read()
        #
        # ec2_user_data = ec2.UserData.custom(content=user_data)
        # ec2_instance =ec2.Instance(
        #     self,
        #     "ec2Instance",
        #     instance_type=ec2.InstanceType("t2.small"),
        #     machine_image=ec2.AmazonLinuxImage(),
        #     vpc=vpc,
        #     user_data=ec2_user_data
        # )

        # launch_template_data = core.CfnResource(
        #     self,
        #     "cromwell_launch_template_data",
        #     type="AWS::EC2::LaunchTemplate.LaunchTemplateData",
        #     properties={
        #         "UserData": user_data
        #     }
        # )
        #
        # launch_template = ec2.CfnLaunchTemplate(
        #     self,
        #     "cromwell_launch_template",
        #     launch_template_name="cromwell_launch_template",
        #     launch_template_data=launch_template_data,
        # )
        #
        # compute_resources = core.CfnResource(
        #     self,
        #     "cromwell_compute_resources",
        #     type="AWS::Batch::ComputeEnvironment.ComputeResources",
        #     properties={
        #       "DesiredvCpus": 256,
        #       "Ec2KeyPair": "genovic-qc-eddev",
        #       "InstanceRole": "arn:aws:iam::562965587442:role/ecsInstanceRole",
        #       "InstanceTypes": ["optimal"],
        #       "LaunchTemplate": launch_template.launch_template_name,
        #       "MaxvCpus": 256,
        #       "MinvCpus": 0,
        #       "SecurityGroupIds": [vpc.vpc_default_security_group],
        #       "Subnets": [subnet.subnet_id for subnet in vpc.public_subnets],
        #       "Tags": "cromwell_compute_resource",
        #       "Type": "EC2"
        #     }
        # )
        #
        # compute_env = batch.CfnComputeEnvironment(
        #     self,
        #     "cromwell_compute_env",
        #     service_role=batch_service_role,
        #     compute_environment_name="cromwell_compute_env",
        #     type="MANAGED",
        #     state="ENABLED",
        #     compute_resources=compute_resources
        # )
        #
        # queue = batch.CfnJobQueue(
        #     self,
        #     "cromwell_queue",
        #     compute_environment_order=compute_env,
        #     priority=1,
        #     job_queue_name="cromwell_queue",
        #     state="ENABLED"
        # )
        #
        # core.CfnOutput(
        #     self,
        #     "cromwell_queue_name",
        #     value=queue.job_queue_name
        # )

        core.CfnOutput(
            self,
            "FargateCromwellServiceArn",
            value=service.service_arn
        )
コード例 #21
0
ファイル: app.py プロジェクト: isimahei/ecsdemo-platform
    def appmesh(self):

        # This will create the app mesh (control plane)
        self.mesh = aws_appmesh.Mesh(self,
                                     "EcsWorkShop-AppMesh",
                                     mesh_name="ecs-mesh")

        # We will create a App Mesh Virtual Gateway
        self.mesh_vgw = aws_appmesh.VirtualGateway(
            self,
            "Mesh-VGW",
            mesh=self.mesh,
            listeners=[aws_appmesh.VirtualGatewayListener.http(port=3000)],
            virtual_gateway_name="ecsworkshop-vgw")

        # Creating the mesh gateway task for the frontend app
        # For more info related to App Mesh Proxy check https://docs.aws.amazon.com/app-mesh/latest/userguide/getting-started-ecs.html
        self.mesh_gw_proxy_task_def = aws_ecs.FargateTaskDefinition(
            self,
            "mesh-gw-proxy-taskdef",
            cpu=256,
            memory_limit_mib=512,
            family="mesh-gw-proxy-taskdef",
        )

        # LogGroup for the App Mesh Proxy Task
        self.logGroup = aws_logs.LogGroup(
            self,
            "ecsworkshopMeshGateway",
            #log_group_name="ecsworkshop-mesh-gateway",
            retention=aws_logs.RetentionDays.ONE_WEEK)

        # App Mesh Virtual Gateway Envoy proxy Task definition
        # For a use specific ECR region, please check https://docs.aws.amazon.com/app-mesh/latest/userguide/envoy.html
        container = self.mesh_gw_proxy_task_def.add_container(
            "mesh-gw-proxy-contdef",
            image=aws_ecs.ContainerImage.from_registry(
                "public.ecr.aws/appmesh/aws-appmesh-envoy:v1.18.3.0-prod"),
            container_name="envoy",
            memory_reservation_mib=256,
            environment={
                "REGION": getenv('AWS_DEFAULT_REGION'),
                "ENVOY_LOG_LEVEL": "info",
                "ENABLE_ENVOY_STATS_TAGS": "1",
                # "ENABLE_ENVOY_XRAY_TRACING": "1",
                "APPMESH_RESOURCE_ARN": self.mesh_vgw.virtual_gateway_arn
            },
            essential=True,
            logging=aws_ecs.LogDriver.aws_logs(stream_prefix='/mesh-gateway',
                                               log_group=self.logGroup),
            health_check=aws_ecs.HealthCheck(command=[
                "CMD-SHELL",
                "curl -s http://localhost:9901/server_info | grep state | grep -q LIVE"
            ], ))

        # Default port where frontend app is listening
        container.add_port_mappings(aws_ecs.PortMapping(container_port=3000))

        #ammmesh-xray-uncomment
        # xray_container = self.mesh_gw_proxy_task_def.add_container(
        #     "FrontendServiceXrayContdef",
        #     image=aws_ecs.ContainerImage.from_registry("amazon/aws-xray-daemon"),
        #     logging=aws_ecs.LogDriver.aws_logs(
        #         stream_prefix='/xray-container',
        #         log_group=self.logGroup
        #     ),
        #     essential=True,
        #     container_name="xray",
        #     memory_reservation_mib=256,
        #     user="******"
        # )

        # container.add_container_dependencies(aws_ecs.ContainerDependency(
        #       container=xray_container,
        #       condition=aws_ecs.ContainerDependencyCondition.START
        #   )
        # )
        #ammmesh-xray-uncomment

        # For environment variables check https://docs.aws.amazon.com/app-mesh/latest/userguide/envoy-config.html
        self.mesh_gateway_proxy_fargate_service = aws_ecs_patterns.NetworkLoadBalancedFargateService(
            self,
            "MeshGW-Proxy-Fargate-Service",
            service_name='mesh-gw-proxy',
            cpu=256,
            memory_limit_mib=512,
            desired_count=1,
            listener_port=80,
            assign_public_ip=True,
            task_definition=self.mesh_gw_proxy_task_def,
            cluster=self.ecs_cluster,
            public_load_balancer=True,
            cloud_map_options=aws_ecs.CloudMapOptions(
                cloud_map_namespace=self.ecs_cluster.
                default_cloud_map_namespace,
                name='mesh-gw-proxy'))

        # For testing purposes we will open any ipv4 requests to port 3000
        self.mesh_gateway_proxy_fargate_service.service.connections.allow_from_any_ipv4(
            port_range=aws_ec2.Port(protocol=aws_ec2.Protocol.TCP,
                                    string_representation="vtw_proxy",
                                    from_port=3000,
                                    to_port=3000),
            description="Allow NLB connections on port 3000")

        self.mesh_gw_proxy_task_def.default_container.add_ulimits(
            aws_ecs.Ulimit(hard_limit=15000,
                           name=aws_ecs.UlimitName.NOFILE,
                           soft_limit=15000))

        #Adding necessary policies for Envoy proxy to communicate with required services
        self.mesh_gw_proxy_task_def.execution_role.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonEC2ContainerRegistryReadOnly"))
        self.mesh_gw_proxy_task_def.execution_role.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                "CloudWatchLogsFullAccess"))

        self.mesh_gw_proxy_task_def.task_role.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                "CloudWatchFullAccess"))
        # self.mesh_gw_proxy_task_def.task_role.add_managed_policy(aws_iam.ManagedPolicy.from_aws_managed_policy_name("AWSXRayDaemonWriteAccess"))
        self.mesh_gw_proxy_task_def.task_role.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                "AWSAppMeshEnvoyAccess"))

        self.mesh_gw_proxy_task_def.execution_role.add_to_policy(
            aws_iam.PolicyStatement(actions=['ec2:DescribeSubnets'],
                                    resources=['*']))

        core.CfnOutput(self,
                       "MeshGwNlbDns",
                       value=self.mesh_gateway_proxy_fargate_service.
                       load_balancer.load_balancer_dns_name,
                       export_name="MeshGwNlbDns")
        core.CfnOutput(self,
                       "MeshArn",
                       value=self.mesh.mesh_arn,
                       export_name="MeshArn")
        core.CfnOutput(self,
                       "MeshName",
                       value=self.mesh.mesh_name,
                       export_name="MeshName")
        core.CfnOutput(
            self,
            "MeshEnvoyServiceArn",
            value=self.mesh_gateway_proxy_fargate_service.service.service_arn,
            export_name="MeshEnvoyServiceArn")
        core.CfnOutput(self,
                       "MeshVGWArn",
                       value=self.mesh_vgw.virtual_gateway_arn,
                       export_name="MeshVGWArn")
        core.CfnOutput(self,
                       "MeshVGWName",
                       value=self.mesh_vgw.virtual_gateway_name,
                       export_name="MeshVGWName")
コード例 #22
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # CONTAINER_IMAGE = 'daskdev/dask:0.19.4'
        # if use_rapids:
        #   CONTAINER_IMAGE = 'rapidsai/rapidsai:latest'

        # if use_notebook:
        #   CONTAINER_IMAGE = 'daskdev/dask-notebook:latest'

        #TODO : Create ECR repository
        #Update: Not required sunce ecs.ContainerImage already creates and pushes using same asset

        #ecr = aws_ecr.Repository(self, 'MyECR', repository_name='dask')
        # not needed if you use an asset like below:

        dockercontainer = ecs.ContainerImage.from_asset(
            directory='dockerstuff', build_args=['-t dask .'])

        # Create vpc
        vpc = ec2.Vpc(self, 'MyVpc', max_azs=3)  # default is all AZs in region
        subnets = vpc.private_subnets

        # Create log groups for the scheduler and workers
        s_logs = logs.LogGroup(self, 'SlogGroup', log_group_name='SlogGroup')
        w_logs = logs.LogGroup(self, 'WlogGroup', log_group_name='WlogGroup')

        #Create private namespace
        #nspace = sd.PrivateDnsNamespace(self, 'MyNamespace', vpc=vpc, name='local-dask')

        # #Create role for ECS
        nRole = iam_.Role(self,
                          'ECSExecutionRole',
                          assumed_by=iam_.ServicePrincipal('ecs-tasks'))

        nPolicy = iam_.Policy(
            self,
            "ECSExecutionPolicy",
            policy_name="ECSExecutionPolicy",
            statements=[
                iam_.PolicyStatement(actions=[
                    'ecr:BatchCheckLayerAvailability',
                    'ecr:GetDownloadUrlForLayer', 'ecr:BatchGetImage',
                    'ecr:GetAuthorizationToken', 'logs:CreateLogStream',
                    'logs:PutLogEvents', 'sagemaker:*', 's3:*'
                ],
                                     resources=[
                                         '*',
                                     ]),
            ]).attach_to_role(nRole)

        # Create ECS cluster
        cluster = ecs.Cluster(self,
                              'DaskCluster',
                              vpc=vpc,
                              cluster_name='Fargate-Dask-Cluster')

        nspace = cluster.add_default_cloud_map_namespace(
            name='local-dask', type=sd.NamespaceType.DNS_PRIVATE, vpc=vpc)

        #TO DO: Use default namespace for cluster and use cmap options within fargate service
        #Update: done

        # schedulerRegistry = sd.Service(self,'serviceRegistryScheduler',
        #     namespace=nspace,dns_ttl=core.Duration.seconds(60),
        #     custom_health_check=sd.HealthCheckCustomConfig(failure_threshold=10),
        #     name='Dask-Scheduler')

        # # schedulerRegistry.register_ip_instance(id='serviceRegistryScheduler',ipv4='')

        # workerRegistry = sd.Service(self,'workerRegistryScheduler',
        #     namespace=nspace,dns_ttl=core.Duration.seconds(60),
        #     custom_health_check=sd.HealthCheckCustomConfig(failure_threshold=10),
        #     name='Dask-Worker')

        # -------------------- Add scheduler task ------------------------
        schedulerTask = ecs.TaskDefinition(
            self,
            'taskDefinitionScheduler',
            compatibility=ecs.Compatibility.FARGATE,
            cpu='4096',
            memory_mib='8192',
            network_mode=ecs.NetworkMode.AWS_VPC,
            placement_constraints=None,
            execution_role=nRole,
            family='Dask-Scheduler',
            task_role=nRole)

        schedulerTask.add_container('MySchedulerImage',
                                    image=dockercontainer,
                                    command=['dask-scheduler'],
                                    cpu=4096,
                                    essential=True,
                                    logging=ecs.LogDriver.aws_logs(
                                        stream_prefix='ecs', log_group=s_logs),
                                    memory_limit_mib=8192,
                                    memory_reservation_mib=8192)

        # -------------------- Add worker task -----------------------------
        workerTask = ecs.TaskDefinition(
            self,
            'taskDefinitionWorker',
            compatibility=ecs.Compatibility.FARGATE,
            cpu='4096',
            memory_mib='8192',
            network_mode=ecs.NetworkMode.AWS_VPC,
            placement_constraints=None,
            execution_role=nRole,
            family='Dask-Worker',
            task_role=nRole)

        workerTask.add_container(
            'MyWorkerImage',
            image=dockercontainer,
            command=[
                'dask-worker', 'dask-scheduler.local-dask:8786',
                '--memory-limit 1800MB', '--worker-port 9000',
                '--nanny-port 9001', '--bokeh-port 9002'
            ],
            cpu=4096,
            essential=True,
            logging=ecs.LogDriver.aws_logs(stream_prefix='ecs',
                                           log_group=s_logs),
            memory_limit_mib=8192,
            memory_reservation_mib=8192)

        # Task security group
        sg = ec2.SecurityGroup(self,
                               'MySG',
                               vpc=vpc,
                               description='Enable Scheduler ports access',
                               security_group_name='DaskSecurityGroup')

        # Ingress rule requires IPeer not Peer
        # TO DO: fix from any ipv4 to SG
        p1 = ec2.Peer().ipv4('0.0.0.0/0')
        p2 = ec2.Peer().ipv4('0.0.0.0/0')

        sg.add_ingress_rule(peer=p1,
                            connection=ec2.Port(protocol=ec2.Protocol.TCP,
                                                string_representation='p1',
                                                from_port=8786,
                                                to_port=8789))

        sg.add_ingress_rule(peer=p2,
                            connection=ec2.Port(protocol=ec2.Protocol.TCP,
                                                string_representation='p2',
                                                from_port=9000,
                                                to_port=9002))

        # ----------------- Add Scheduler Service -----------------------

        # deployconfig = ecs.CfnService.DeploymentConfigurationProperty(maximum_percent=200,minimum_healthy_percent=100)

        # vpcconfig = ecs.CfnService.AwsVpcConfigurationProperty(subnets = subnets,assign_public_ip=True,security_groups=[sg])

        # networkconfig = ecs.CfnService.NetworkConfigurationProperty(awsvpc_configuration=vpcconfig)

        # schedulerService = ecs.CfnService(self, 'DaskSchedulerService',
        #     task_definition = schedulerTask, deployment_configuration=deployconfig,
        #     cluster=cluster, desired_count=1, enable_ecs_managed_tags=None,
        #     launch_type='FARGATE',network_configuration=networkconfig,
        #     service_registries=schedulerRegistry)

        #ecs.CfnService.ServiceRegistryProperty()

        # Try fargate service? No service registry option available
        #using default cluster namespace
        cmap1 = ecs.CloudMapOptions(dns_ttl=core.Duration.seconds(60),
                                    failure_threshold=10,
                                    name='Dask-Scheduler')

        schedulerService = ecs.FargateService(
            self,
            'DaskSchedulerService',
            task_definition=schedulerTask,
            assign_public_ip=True,
            security_group=sg,
            #vpc_subnets=subnets,
            cluster=cluster,
            desired_count=1,
            max_healthy_percent=200,
            min_healthy_percent=100,
            service_name='Dask-Scheduler',
            cloud_map_options=cmap1)

        # schedulerService.enable_cloud_map(name = 'serviceRegistryScheduler')
        # schedulerRegistry.register_non_ip_instance(self,instance_id='DaskSchedulerService')

        # ----------------- Add Worker Service -----------------------
        #using default cluster namespace
        cmap2 = ecs.CloudMapOptions(dns_ttl=core.Duration.seconds(60),
                                    failure_threshold=10,
                                    name='Dask-Worker')

        workerService = ecs.FargateService(
            self,
            'DaskWorkerService',
            task_definition=workerTask,
            assign_public_ip=True,
            security_group=sg,
            #vpc_subnets=subnets,
            cluster=cluster,
            desired_count=1,
            max_healthy_percent=200,
            min_healthy_percent=100,
            service_name='Dask-Worker',
            cloud_map_options=cmap2)

        # workerService.enable_cloud_map(name = 'workerRegistryScheduler')

        #------------------------------------------------------------------------

        # Very less control with ECS patterns, did not work

        # ecs_patterns.ApplicationLoadBalancedFargateService(self, "DaskFargateStack",
        #     cluster=cluster,            # Required
        #     cpu=512,                    # Default is 256
        #     desired_count=6,            # Default is 1
        #     task_image_options=ecs_patterns.ApplicationLoadBalancedTaskImageOptions(
        #         image=ecs.ContainerImage.from_registry(CONTAINER_IMAGE)),
        #     memory_limit_mib=2048,      # Default is 512
        #     public_load_balancer=True)  # Default is False

        # Start a notebook in the same vpc
        # print(type(sg.security_group_id))
        # print("------------------------------")
        # print(subnets[0].subnet_id)
        #Create role for Notebook instance
        smRole = iam_.Role(self,
                           "notebookAccessRole",
                           assumed_by=iam_.ServicePrincipal('sagemaker'))

        smPolicy = iam_.Policy(self,
                               "notebookAccessPolicy",
                               policy_name="notebookAccessPolicy",
                               statements=[
                                   iam_.PolicyStatement(
                                       actions=['s3:*', 'ecs:*'],
                                       resources=[
                                           '*',
                                       ]),
                               ]).attach_to_role(smRole)

        notebook = sagemaker_.CfnNotebookInstance(
            self,
            'DaskNotebook',
            instance_type='ml.t2.medium',
            volume_size_in_gb=50,
            security_group_ids=[sg.security_group_id],
            subnet_id=subnets[0].subnet_id,
            notebook_instance_name='DaskNotebook',
            role_arn=smRole.role_arn,
            root_access='Enabled',
            direct_internet_access='Enabled',
            default_code_repository=
            'https://github.com/w601sxs/dask-examples.git')
コード例 #23
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 domain_name: str,
                 identity_provider_client_id: str,
                 identity_provider_client_secret: str,
                 identity_provider_client_url: str,
                 identity_provider_realm: str,
                 identity_provider_scope: str = 'openid',
                 vpc: ec2.IVpc = None,
                 cluster: ecs.ICluster = None,
                 load_balancer: elbv2.IApplicationLoadBalancer = None,
                 log_group: logs.ILogGroup = None,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        if vpc is None:
            vpc = ec2.Vpc(self, 'ApplicationkVpc')

        if cluster is None:
            cluster = ecs.Cluster(self, 'ApplicationCluster', vpc=vpc)

        if log_group is None:
            log_group = logs.LogGroup(
                self,
                'ApplicationLogGroup',
                retention=logs.RetentionDays.ONE_WEEK,
                removal_policy=core.RemovalPolicy.DESTROY)

        application_task_role = iam.Role(
            self,
            'ApplicationTaskRole',
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'))

        application_hosted_zone = route53.HostedZone.from_lookup(
            self, 'ApplicationHostedZone', domain_name=domain_name)

        application_certificate = acm.DnsValidatedCertificate(
            self,
            'FrontendAlbCertificate',
            hosted_zone=application_hosted_zone,
            domain_name='app.' + domain_name)

        application_service = ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            'ApplicationLoadBalancedFargateService',
            cluster=cluster,
            load_balancer=load_balancer,
            task_image_options=ecs_patterns.
            ApplicationLoadBalancedTaskImageOptions(
                image=ecs.ContainerImage.from_asset("application"),
                enable_logging=True,
                log_driver=ecs.AwsLogDriver(stream_prefix='application',
                                            log_group=log_group),
                task_role=application_task_role,
                container_port=8080,
            ),
            memory_limit_mib=512,
            cpu=256,
            desired_count=1,
            public_load_balancer=True,
            domain_name='app.' + domain_name,
            domain_zone=application_hosted_zone,
            protocol=elbv2.ApplicationProtocol.HTTPS,
        )

        application_service.target_group.enable_cookie_stickiness(
            core.Duration.seconds(24 * 60 * 60))
        application_service.target_group.configure_health_check(
            port='8080',
            path='/',
            timeout=core.Duration.seconds(20),
            healthy_threshold_count=2,
            unhealthy_threshold_count=10,
            interval=core.Duration.seconds(30),
        )

        application_service.listener.add_certificates(
            'ApplicationServiceCertificate',
            certificates=[application_certificate])

        application_service.listener.add_action(
            'DefaultAction',
            action=elbv2.ListenerAction.authenticate_oidc(
                authorization_endpoint=identity_provider_client_url +
                '/auth/realms/' + identity_provider_realm +
                '/protocol/openid-connect/auth',
                token_endpoint=identity_provider_client_url + '/auth/realms/' +
                identity_provider_realm + '/protocol/openid-connect/token',
                user_info_endpoint=identity_provider_client_url +
                '/auth/realms/' + identity_provider_realm +
                '/protocol/openid-connect/userinfo',
                issuer=identity_provider_client_url + '/auth/realms/' +
                identity_provider_realm,
                client_id=identity_provider_client_id,
                client_secret=core.SecretValue(
                    identity_provider_client_secret),
                scope=identity_provider_scope,
                on_unauthenticated_request=elbv2.UnauthenticatedAction.
                AUTHENTICATE,
                next=elbv2.ListenerAction.forward(
                    [application_service.target_group]),
            ))

        application_service.load_balancer.connections.allow_to_any_ipv4(
            port_range=ec2.Port(
                from_port=443,
                to_port=443,
                protocol=ec2.Protocol.TCP,
                string_representation='Allow ALB to verify token'))
コード例 #24
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        cpu: Union[int, float] = 256,
        memory: Union[int, float] = 512,
        mincount: int = 1,
        maxcount: int = 50,
        permissions: Optional[List[iam.PolicyStatement]] = None,
        code_dir: str = "./titiler/",
        **kwargs: Any,
    ) -> None:
        """Define stack."""
        super().__init__(scope, id, **kwargs)

        permissions = permissions or []

        vpc = ec2.Vpc(self, f"{id}-vpc", max_azs=2)

        cluster = ecs.Cluster(self, f"{id}-cluster", vpc=vpc)

        task_env = DEFAULT_ENV.copy()
        task_env.update(
            dict(
                MODULE_NAME="titiler.main",
                VARIABLE_NAME="app",
                LOG_LEVEL="error",
            ))

        # GUNICORN configuration
        if settings.workers_per_core:
            task_env.update(
                {"WORKERS_PER_CORE": str(settings.workers_per_core)})
        if settings.max_workers:
            task_env.update({"MAX_WORKERS": str(settings.max_workers)})
        if settings.web_concurrency:
            task_env.update({"WEB_CONCURRENCY": str(settings.web_concurrency)})

        fargate_service = ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            f"{id}-service",
            cluster=cluster,
            cpu=cpu,
            memory_limit_mib=memory,
            desired_count=mincount,
            public_load_balancer=True,
            listener_port=80,
            task_image_options=dict(
                image=ecs.ContainerImage.from_asset(
                    code_dir,
                    exclude=["cdk.out", ".git"],
                    file="Dockerfiles/ecs/Dockerfile",
                ),
                container_port=80,
                environment=task_env,
            ),
        )

        for perm in permissions:
            fargate_service.task_definition.task_role.add_to_policy(perm)

        scalable_target = fargate_service.service.auto_scale_task_count(
            min_capacity=mincount, max_capacity=maxcount)

        # https://github.com/awslabs/aws-rails-provisioner/blob/263782a4250ca1820082bfb059b163a0f2130d02/lib/aws-rails-provisioner/scaling.rb#L343-L387
        scalable_target.scale_on_request_count(
            "RequestScaling",
            requests_per_target=50,
            scale_in_cooldown=core.Duration.seconds(240),
            scale_out_cooldown=core.Duration.seconds(30),
            target_group=fargate_service.target_group,
        )

        # scalable_target.scale_on_cpu_utilization(
        #     "CpuScaling", target_utilization_percent=70,
        # )

        fargate_service.service.connections.allow_from_any_ipv4(
            port_range=ec2.Port(
                protocol=ec2.Protocol.ALL,
                string_representation="All port 80",
                from_port=80,
            ),
            description="Allows traffic on port 80 from NLB",
        )
コード例 #25
0
#docker_volume=ecs.Volume(name='docker_vol',docker_volume_configuration=docker_vol_config)

#efs_mount=ecs.MountPoint(container_path='/efs',read_only=True, source_volume='docker_vol')

cluster = ecs.Cluster(stack,
                      "wes-ecs",
                      vpc=vpc,
                      cluster_name='Proj-VONC_VISTA')
print('cluster sec group ', str(type(cluster.autoscaling_group)))
#cluster.add_capacity("DefaultAutoScalingGroup",
#                     instance_type=ec2.InstanceType("c5.xlarge"), key_name='Vonc-Prod-Key',max_capacity=4,machine_image=amitouse,
#                     desired_capacity=2,min_capacity=2)

print('connections ', str(cluster.connections))
port = ec2.Port(protocol=ec2.Protocol.TCP,
                string_representation='inbound to container instances',
                from_port=22,
                to_port=22)
cluster.connections.add_security_group(app_security_group_import)
cluster.connections.allow_from_any_ipv4(port,
                                        'in bound to container instances')

# Create a task definition with its own elastic network interface

task_definition_vistaweb = ecs.FargateTaskDefinition(
    stack,
    "Proj-VONC_VISTA-task-vistaweb",
    memory_limit_mib=8192,
    cpu=4096,
    family='Proj-VONC_VISTA-Vista')
task_definition_vistaweb.add_to_execution_role_policy(custom_policy)
task_definition_vistaweb.add_to_task_role_policy(custom_policy)
コード例 #26
0
ファイル: vpc.py プロジェクト: dominodatalab/cdk-cf-eks
    def provision_bastion(self, name: str, bastion: VPC.Bastion) -> None:
        if not bastion.enabled:
            return None
        if bastion.machine_image:
            bastion_machine_image = ec2.MachineImage.generic_linux(
                {self.region: bastion.machine_image.ami_id},
                user_data=ec2.UserData.custom(bastion.machine_image.user_data),
            )
        else:
            if not self.scope.account.isnumeric(
            ):  # TODO: Can we get rid of this requirement?
                raise ValueError(
                    "Error loooking up AMI: Must provide explicit AWS account ID to do AMI lookup. Either provide AMI ID or AWS account id"
                )

            bastion_machine_image = ec2.LookupMachineImage(
                name="ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*",
                owners=["099720109477"])

        bastion_sg = ec2.SecurityGroup(
            self.scope,
            "bastion_sg",
            vpc=self.vpc,
            security_group_name=f"{name}-bastion",
        )

        for rule in bastion.ingress_ports:
            for ip_cidr in rule.ip_cidrs:
                bastion_sg.add_ingress_rule(
                    peer=ec2.Peer.ipv4(ip_cidr),
                    connection=ec2.Port(
                        protocol=ec2.Protocol(rule.protocol),
                        string_representation=rule.name,
                        from_port=rule.from_port,
                        to_port=rule.to_port,
                    ),
                )

        bastion = ec2.Instance(
            self.scope,
            "bastion",
            machine_image=bastion_machine_image,
            vpc=self.vpc,
            instance_type=ec2.InstanceType(bastion.instance_type),
            key_name=bastion.key_name,
            security_group=bastion_sg,
            vpc_subnets=ec2.SubnetSelection(
                subnet_group_name=self.public_subnet_name, ),
        )

        ec2.CfnEIP(
            self.scope,
            "bastion_eip",
            instance_id=bastion.instance_id,
        )

        cdk.CfnOutput(self.scope,
                      "bastion_public_ip",
                      value=bastion.instance_public_ip)

        return bastion_sg
コード例 #27
0
ファイル: emr.py プロジェクト: dr-natetorious/aws-emr-hive
    def __init__(self, scope: core.Construct, id: str,
                 landing_zone: ILandingZone,
                 directory: DirectoryServicesConstruct, group_names: [List],
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        self.__landing_zone = landing_zone

        # Configure the security groups
        self.security_group = ec2.SecurityGroup(
            self,
            'SecurityGroup',
            vpc=landing_zone.networking.vpc,
            allow_all_outbound=True,
            description='HadoopConstruct Security Group',
            security_group_name='hadoop-mapreduce-group')

        for port in services.keys():
            self.security_group.add_ingress_rule(
                peer=ec2.Peer.any_ipv4(),
                connection=ec2.Port(protocol=ec2.Protocol.TCP,
                                    from_port=port,
                                    to_port=port,
                                    string_representation=services[port]))

        self.security_group.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port(protocol=ec2.Protocol.UDP,
                                from_port=0,
                                to_port=65535,
                                string_representation='Allow All UDP Traffic'))

        self.security_group.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port(protocol=ec2.Protocol.TCP,
                                from_port=0,
                                to_port=65535,
                                string_representation='Allow All TCP Traffic'))

        # Setup roles...
        self.jobFlowRole = iam.Role(
            self,
            'JobFlowRole',
            assumed_by=iam.ServicePrincipal(service='ec2.amazonaws.com'),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AmazonSSMManagedInstanceCore'),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AmazonElasticMapReduceforEC2Role'),
            ])

        profile_name = 'jobflowprofile@{}-{}'.format(
            landing_zone.zone_name,
            core.Stack.of(self).region)
        job_flow_instance_profile = iam.CfnInstanceProfile(
            self,
            'JobFlowInstanceProfile',
            instance_profile_name=profile_name,
            roles=[self.jobFlowRole.role_name])

        serviceRole = iam.Role(
            self,
            'ServiceRole',
            assumed_by=iam.ServicePrincipal(
                service='elasticmapreduce.amazonaws.com'),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AmazonElasticMapReduceRole')
            ])

        self.database = g.Database(self,
                                   'GlueStore',
                                   database_name='demo-database')

        self.bucket = s3.Bucket(self,
                                'LogBucket',
                                removal_policy=core.RemovalPolicy.DESTROY)

        emr_fs = EmrfsConstruct(self,
                                'Emrfs',
                                landing_zone=landing_zone,
                                directory=directory,
                                group_names=group_names,
                                job_flow_role=self.jobFlowRole)

        # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticmapreduce-instancefleetconfig.html
        self.cluster = emr.CfnCluster(
            self,
            'Hadoop',
            name='HadoopCluster',
            job_flow_role=profile_name,  #'EMR_EC2_DefaultRole',
            service_role=serviceRole.role_name,
            log_uri='s3://' + self.bucket.bucket_name + '/logs',
            release_label='emr-6.2.0',
            applications=[
                emr.CfnCluster.ApplicationProperty(name='Spark'),
                emr.CfnCluster.ApplicationProperty(name='Presto'),
                emr.CfnCluster.ApplicationProperty(name='Hue'),
                emr.CfnCluster.ApplicationProperty(name='Hive'),
                emr.CfnCluster.ApplicationProperty(name='JupyterHub'),
            ],
            configurations=[
                emr.CfnCluster.ConfigurationProperty(
                    classification='spark-hive-site',
                    configuration_properties={
                        'hive.metastore.client.factory.class':
                        'com.amazonaws.glue.catalog.metastore.AWSGlueDataCatalogHiveClientFactory'
                    }),
                emr.CfnCluster.ConfigurationProperty(
                    classification='hive-site',
                    configuration_properties={
                        'hive.metastore.client.factory.class':
                        'com.amazonaws.glue.catalog.metastore.AWSGlueDataCatalogHiveClientFactory',
                        'aws.glue.partition.num.segments':
                        '10',  #1 to 10; (default=5)
                        'hive.metastore.schema.verification': 'false',
                    })
            ],
            security_configuration=emr_fs.security_configuration.ref,
            # kerberos_attributes= emr.CfnCluster.KerberosAttributesProperty(
            #   kdc_admin_password=directory.password,
            #   realm= directory.mad.name.upper(),
            #   ad_domain_join_password=directory.password,
            #   ad_domain_join_user= directory.admin
            # ),
            managed_scaling_policy=emr.CfnCluster.ManagedScalingPolicyProperty(
                compute_limits=emr.CfnCluster.ComputeLimitsProperty(
                    minimum_capacity_units=1,
                    maximum_capacity_units=25,
                    unit_type='InstanceFleetUnits')),
            instances=emr.CfnCluster.JobFlowInstancesConfigProperty(
                #hadoop_version='2.4.0',
                termination_protected=False,
                master_instance_fleet=emr.CfnCluster.
                InstanceFleetConfigProperty(
                    target_spot_capacity=1,
                    instance_type_configs=[
                        emr.CfnCluster.InstanceTypeConfigProperty(
                            instance_type='m5.xlarge', )
                    ]),
                core_instance_fleet=emr.CfnCluster.InstanceFleetConfigProperty(
                    target_spot_capacity=1,
                    instance_type_configs=[
                        emr.CfnCluster.InstanceTypeConfigProperty(
                            instance_type='m5.xlarge',
                            ebs_configuration=emr.CfnCluster.
                            EbsConfigurationProperty(ebs_block_device_configs=[
                                emr.CfnCluster.EbsBlockDeviceConfigProperty(
                                    volume_specification=emr.CfnCluster.
                                    VolumeSpecificationProperty(
                                        size_in_gb=50, volume_type='gp2'))
                            ]))
                    ]),
                additional_master_security_groups=[
                    self.security_group.security_group_id
                ],
                additional_slave_security_groups=[
                    self.security_group.security_group_id
                ],
                ec2_subnet_ids=[
                    net.subnet_id for net in landing_zone.networking.vpc.
                    _select_subnet_objects(subnet_group_name='Hadoop')
                ],
            ))

        self.cluster.add_depends_on(job_flow_instance_profile)
コード例 #28
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc,
                 region: str) -> None:
        super().__init__(scope, id)

        # create an IAM role to attach to the squid instances
        squid_iam_role = iam.Role(
            self,
            "squid-role",
            assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "CloudWatchAgentServerPolicy"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AmazonEC2RoleforSSM")
            ])

        # Add policy to allow EC2 update instance attributes
        squid_iam_role.add_to_policy(
            statement=iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                          actions=[
                                              'ec2:ModifyInstanceAttribute',
                                          ],
                                          resources=['*']))

        # Create bucket to hold Squid config and whitelist files
        squid_config_bucket = s3.Bucket(
            self, "squid-config", encryption=s3.BucketEncryption.KMS_MANAGED)

        # Upload config and whiteliest files to S3 bucket
        s3_deployment.BucketDeployment(
            self,
            "config",
            destination_bucket=squid_config_bucket,
            sources=[
                s3_deployment.Source.asset(
                    path='./squid_app/squid_config_files/config_files_s3')
            ])

        # Provide access to EC2 instance role to read and write to bucket
        squid_config_bucket.grant_read_write(identity=squid_iam_role)

        # Set the AMI to the latest Amazon Linux 2
        amazon_linux_2_ami = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        if vpc.public_subnets:
            # Squid ASGs with desired capacity as 1 Instance in each of the AZs
            self.squid_asgs = []
            for count, az in enumerate(vpc.availability_zones, start=1):
                asg = autoscaling.AutoScalingGroup(
                    self,
                    f"asg-{count}",
                    vpc=vpc,
                    instance_type=ec2.InstanceType("t3.nano"),
                    desired_capacity=1,
                    max_capacity=1,
                    min_capacity=1,
                    machine_image=amazon_linux_2_ami,
                    role=squid_iam_role,
                    vpc_subnets=ec2.SubnetSelection(
                        availability_zones=[az],
                        one_per_az=True,
                        subnet_type=ec2.SubnetType.PUBLIC),
                    health_check=autoscaling.HealthCheck.ec2(
                        grace=core.Duration.minutes(5)),
                    resource_signal_count=1,
                    resource_signal_timeout=core.Duration.minutes(10))

                cfn_asg: autoscaling.CfnAutoScalingGroup = asg.node.default_child
                asg_logical_id = cfn_asg.logical_id

                # User data: Required parameters in user data script
                user_data_mappings = {
                    "__S3BUCKET__": squid_config_bucket.bucket_name,
                    "__ASG__": asg_logical_id,
                    "__CW_ASG__": "${aws:AutoScalingGroupName}"
                }
                # Replace parameters with values in the user data
                with open(
                        "./squid_app/squid_config_files/user_data/squid_user_data.sh",
                        'r') as user_data_h:
                    # Use a substitution
                    user_data_sub = core.Fn.sub(user_data_h.read(),
                                                user_data_mappings)

                # Add User data to Launch Config of the autoscaling group
                asg.add_user_data(user_data_sub)

                # Security group attached to the ASG Squid instances
                # Outbound: All allowed
                # Inboud: Allowed from VPC CIDR on ports 80, 443)

                asg.connections.allow_from(
                    other=ec2.Peer.ipv4(vpc.vpc_cidr_block),
                    port_range=ec2.Port(protocol=ec2.Protocol.TCP,
                                        string_representation="HTTP from VPC",
                                        from_port=80,
                                        to_port=80))

                asg.connections.allow_from(
                    other=ec2.Peer.ipv4(vpc.vpc_cidr_block),
                    port_range=ec2.Port(protocol=ec2.Protocol.TCP,
                                        string_representation="HTTPS from VPC",
                                        from_port=443,
                                        to_port=443))

                # Create ASG Lifecycle hook to enable updating of route table using Lambda when instance launches and is marked Healthy

                autoscaling.LifecycleHook(
                    self,
                    f"asg-hook-{count}",
                    auto_scaling_group=asg,
                    lifecycle_transition=autoscaling.LifecycleTransition.
                    INSTANCE_LAUNCHING,
                    notification_target=hooktargets.TopicHook(
                        sns.Topic(self,
                                  f"squid-asg-{count}-lifecycle-hook-topic",
                                  display_name=
                                  f"Squid ASG {count} Lifecycle Hook topic")),
                    default_result=autoscaling.DefaultResult.ABANDON,
                    heartbeat_timeout=core.Duration.minutes(5))

                # Tag ASG with the route table IDs used by the isolated and/or private subnets in the availability zone
                # This tag will be used by the Squid Lambda function to identify route tables to update when alarm changes from ALARM to OK

                private_subnets_in_az = []
                isolated_subnets_in_az = []
                route_table_ids = ''

                if vpc.private_subnets:
                    private_subnets_in_az = vpc.select_subnets(
                        availability_zones=[az],
                        subnet_type=ec2.SubnetType.PRIVATE).subnets
                if vpc.isolated_subnets:
                    isolated_subnets_in_az = vpc.select_subnets(
                        availability_zones=[az],
                        subnet_type=ec2.SubnetType.ISOLATED).subnets

                non_public_subnets_in_az = isolated_subnets_in_az + private_subnets_in_az

                # Loop through all non public subnets in AZ to identify route table and create a tag value string
                for subnet in non_public_subnets_in_az:
                    if route_table_ids:
                        route_table_ids = f"{route_table_ids},{subnet.route_table.route_table_id}"
                    else:
                        route_table_ids = subnet.route_table.route_table_id

                # Tag the ASG with route table ids
                core.Tag.add(asg,
                             key='RouteTableIds',
                             value=route_table_ids,
                             apply_to_launched_instances=False)

                self.squid_asgs.append(asg)

        else:
            raise ValueError("No public subnets in VPC")
コード例 #29
0
    def __init__(self, scope: core.Stack, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        self.environment_name = 'ecsworkshop'

        ## Importing existing VPC and ECS Cluster ##
        self.vpc = ec2.Vpc.from_lookup(self,
                                       "VPC",
                                       vpc_name='{}-base/BaseVPC'.format(
                                           self.environment_name))

        self.sd_namespace = sd.PrivateDnsNamespace.from_private_dns_namespace_attributes(
            self,
            "SDNamespace",
            namespace_name=core.Fn.import_value('NSNAME'),
            namespace_arn=core.Fn.import_value('NSARN'),
            namespace_id=core.Fn.import_value('NSID'))

        self.ecs_cluster = ecs.Cluster.from_cluster_attributes(
            self,
            "ECSCluster",
            cluster_name=core.Fn.import_value('ECSClusterName'),
            security_groups=[],
            vpc=self.vpc,
            default_cloud_map_namespace=self.sd_namespace)
        ## End VPC and ECS Cluster ##

        ## Load balancer for ECS service ##
        self.frontend_sec_grp = ec2.SecurityGroup(
            self,
            "FrontendIngress",
            vpc=self.vpc,
            allow_all_outbound=True,
            description="Frontend Ingress All port 80",
        )

        self.load_balancer = elbv2.ApplicationLoadBalancer(
            self,
            "ALB",
            security_group=self.frontend_sec_grp,
            internet_facing=True,
            vpc=self.vpc)

        self.target_group = elbv2.ApplicationTargetGroup(
            self,
            "ALBTG",
            port=8000,
            target_group_name="ECSDemoFargateEFS",
            vpc=self.vpc,
            target_type=elbv2.TargetType.IP)

        self.load_balancer.add_listener(
            "FrontendListener",
            default_target_groups=[self.target_group],
            port=80)
        ## End Load balancer ##

        ## EFS Setup ##
        self.service_sec_grp = ec2.SecurityGroup(
            self,
            "EFSSecGrp",
            vpc=self.vpc,
            description="Allow access to self on NFS Port",
        )

        self.service_sec_grp.connections.allow_from(
            other=self.service_sec_grp,
            port_range=ec2.Port(protocol=ec2.Protocol.TCP,
                                string_representation="Self",
                                from_port=2049,
                                to_port=2049))

        # TODO: possibly create another sec grp for 8000
        self.service_sec_grp.connections.allow_from(
            other=self.frontend_sec_grp,
            port_range=ec2.Port(protocol=ec2.Protocol.TCP,
                                string_representation="LB2Service",
                                from_port=8000,
                                to_port=8000))

        self.shared_fs = efs.FileSystem(
            self,
            "SharedFS",
            vpc=self.vpc,
            security_group=self.service_sec_grp,
        )
        ## End EFS Setup ##

        ## TODO: IAM Role to access EFS access points for task ##

        # Task execution role
        self.task_execution_role = iam.Role(
            self,
            "TaskExecutionRole",
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'),
            description="Task execution role for ecs services",
            managed_policies=[
                iam.ManagedPolicy.from_managed_policy_arn(
                    self,
                    'arn',
                    managed_policy_arn=
                    'arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy'
                )
            ])

        ## END IAM ##

        ## Logging ##
        self.service_log_group = logs.LogGroup(self, "ECSEFSDemoLogGrp")
        ## END Logging ##

        # Cloudformation Outputs
        core.CfnOutput(self,
                       "ExecutionRoleARN",
                       value=self.task_execution_role.role_arn,
                       export_name="ECSFargateEFSDemoTaskExecutionRoleARN")

        core.CfnOutput(self,
                       "EFSID",
                       value=self.shared_fs.file_system_id,
                       export_name="ECSFargateEFSDemoFSID")

        core.CfnOutput(self,
                       "LBName",
                       value=self.load_balancer.load_balancer_name,
                       export_name="ECSFargateEFSDemoLBName")

        core.CfnOutput(self,
                       "TargetGroupArn",
                       value=self.target_group.target_group_arn,
                       export_name="ECSFargateEFSDemoTGARN")

        core.CfnOutput(self,
                       "VPCPrivateSubnets",
                       value=",".join(
                           [x.subnet_id for x in self.vpc.private_subnets]),
                       export_name="ECSFargateEFSDemoPrivSubnets")

        core.CfnOutput(self,
                       "SecurityGroups",
                       value="{},{}".format(
                           self.frontend_sec_grp.security_group_id,
                           self.service_sec_grp.security_group_id),
                       export_name="ECSFargateEFSDemoSecGrps")

        core.CfnOutput(self,
                       "LBURL",
                       value=self.load_balancer.load_balancer_dns_name,
                       export_name="ECSFargateEFSDemoLBURL")

        core.CfnOutput(self,
                       "LogGroupName",
                       value=self.service_log_group.log_group_name,
                       export_name="ECSFargateEFSDemoLogGroupName")
コード例 #30
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        eks_vpc = ec2.Vpc(self, "VPC", cidr="10.0.0.0/16")
        self.eks_vpc = eks_vpc

        # Create IAM Role For code-server bastion
        bastion_role = iam.Role(
            self,
            "BastionRole",
            assumed_by=iam.CompositePrincipal(
                iam.ServicePrincipal("ec2.amazonaws.com"),
                iam.AccountRootPrincipal()),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AdministratorAccess")
            ])
        self.bastion_role = bastion_role
        # Create EC2 Instance Profile for that Role
        instance_profile = iam.CfnInstanceProfile(
            self, "InstanceProfile", roles=[bastion_role.role_name])

        # Create SecurityGroup for the Control Plane ENIs
        eks_security_group = ec2.SecurityGroup(self,
                                               "EKSSecurityGroup",
                                               vpc=eks_vpc,
                                               allow_all_outbound=True)

        eks_security_group.add_ingress_rule(ec2.Peer.ipv4('10.0.0.0/16'),
                                            ec2.Port.all_traffic())

        # Create an EKS Cluster
        eks_cluster = eks.Cluster(
            self,
            "cluster",
            cluster_name="cluster",
            vpc=eks_vpc,
            masters_role=bastion_role,
            default_capacity_type=eks.DefaultCapacityType.NODEGROUP,
            default_capacity_instance=ec2.InstanceType("m5.large"),
            default_capacity=2,
            security_group=eks_security_group,
            endpoint_access=eks.EndpointAccess.PUBLIC_AND_PRIVATE,
            version=eks.KubernetesVersion.V1_17)
        self.cluster_cert = eks_cluster.cluster_certificate_authority_data

        # Deploy ALB Ingress Controller
        # Create the k8s Service account and corresponding IAM Role mapped via IRSA
        alb_service_account = eks_cluster.add_service_account(
            "alb-ingress-controller",
            name="alb-ingress-controller",
            namespace="kube-system")

        # Create the PolicyStatements to attach to the role
        # I couldn't find a way to get this to work with a PolicyDocument and there are 10 of these
        alb_policy_statement_json_1 = {
            "Effect":
            "Allow",
            "Action": [
                "acm:DescribeCertificate", "acm:ListCertificates",
                "acm:GetCertificate"
            ],
            "Resource":
            "*"
        }
        alb_policy_statement_json_2 = {
            "Effect":
            "Allow",
            "Action": [
                "ec2:AuthorizeSecurityGroupIngress", "ec2:CreateSecurityGroup",
                "ec2:CreateTags", "ec2:DeleteTags", "ec2:DeleteSecurityGroup",
                "ec2:DescribeAccountAttributes", "ec2:DescribeAddresses",
                "ec2:DescribeInstances", "ec2:DescribeInstanceStatus",
                "ec2:DescribeInternetGateways",
                "ec2:DescribeNetworkInterfaces", "ec2:DescribeSecurityGroups",
                "ec2:DescribeSubnets", "ec2:DescribeTags", "ec2:DescribeVpcs",
                "ec2:ModifyInstanceAttribute",
                "ec2:ModifyNetworkInterfaceAttribute",
                "ec2:RevokeSecurityGroupIngress"
            ],
            "Resource":
            "*"
        }
        alb_policy_statement_json_3 = {
            "Effect":
            "Allow",
            "Action": [
                "elasticloadbalancing:AddListenerCertificates",
                "elasticloadbalancing:AddTags",
                "elasticloadbalancing:CreateListener",
                "elasticloadbalancing:CreateLoadBalancer",
                "elasticloadbalancing:CreateRule",
                "elasticloadbalancing:CreateTargetGroup",
                "elasticloadbalancing:DeleteListener",
                "elasticloadbalancing:DeleteLoadBalancer",
                "elasticloadbalancing:DeleteRule",
                "elasticloadbalancing:DeleteTargetGroup",
                "elasticloadbalancing:DeregisterTargets",
                "elasticloadbalancing:DescribeListenerCertificates",
                "elasticloadbalancing:DescribeListeners",
                "elasticloadbalancing:DescribeLoadBalancers",
                "elasticloadbalancing:DescribeLoadBalancerAttributes",
                "elasticloadbalancing:DescribeRules",
                "elasticloadbalancing:DescribeSSLPolicies",
                "elasticloadbalancing:DescribeTags",
                "elasticloadbalancing:DescribeTargetGroups",
                "elasticloadbalancing:DescribeTargetGroupAttributes",
                "elasticloadbalancing:DescribeTargetHealth",
                "elasticloadbalancing:ModifyListener",
                "elasticloadbalancing:ModifyLoadBalancerAttributes",
                "elasticloadbalancing:ModifyRule",
                "elasticloadbalancing:ModifyTargetGroup",
                "elasticloadbalancing:ModifyTargetGroupAttributes",
                "elasticloadbalancing:RegisterTargets",
                "elasticloadbalancing:RemoveListenerCertificates",
                "elasticloadbalancing:RemoveTags",
                "elasticloadbalancing:SetIpAddressType",
                "elasticloadbalancing:SetSecurityGroups",
                "elasticloadbalancing:SetSubnets",
                "elasticloadbalancing:SetWebAcl"
            ],
            "Resource":
            "*"
        }
        alb_policy_statement_json_4 = {
            "Effect":
            "Allow",
            "Action": [
                "iam:CreateServiceLinkedRole", "iam:GetServerCertificate",
                "iam:ListServerCertificates"
            ],
            "Resource":
            "*"
        }
        alb_policy_statement_json_5 = {
            "Effect": "Allow",
            "Action": ["cognito-idp:DescribeUserPoolClient"],
            "Resource": "*"
        }
        alb_policy_statement_json_6 = {
            "Effect":
            "Allow",
            "Action": [
                "waf-regional:GetWebACLForResource", "waf-regional:GetWebACL",
                "waf-regional:AssociateWebACL",
                "waf-regional:DisassociateWebACL"
            ],
            "Resource":
            "*"
        }
        alb_policy_statement_json_7 = {
            "Effect": "Allow",
            "Action": ["tag:GetResources", "tag:TagResources"],
            "Resource": "*"
        }
        alb_policy_statement_json_8 = {
            "Effect": "Allow",
            "Action": ["waf:GetWebACL"],
            "Resource": "*"
        }
        alb_policy_statement_json_9 = {
            "Effect":
            "Allow",
            "Action": [
                "wafv2:GetWebACL", "wafv2:GetWebACLForResource",
                "wafv2:AssociateWebACL", "wafv2:DisassociateWebACL"
            ],
            "Resource":
            "*"
        }
        alb_policy_statement_json_10 = {
            "Effect":
            "Allow",
            "Action": [
                "shield:DescribeProtection", "shield:GetSubscriptionState",
                "shield:DeleteProtection", "shield:CreateProtection",
                "shield:DescribeSubscription", "shield:ListProtections"
            ],
            "Resource":
            "*"
        }

        # Attach the necessary permissions
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_1))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_2))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_3))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_4))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_5))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_6))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_7))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_8))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_9))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_10))

        # Deploy the ALB Ingress Controller from the Helm chart
        eks_cluster.add_helm_chart(
            "aws-alb-ingress-controller",
            chart="aws-alb-ingress-controller",
            repository=
            "http://storage.googleapis.com/kubernetes-charts-incubator",
            namespace="kube-system",
            values={
                "clusterName": "cluster",
                "awsRegion": os.environ["CDK_DEFAULT_REGION"],
                "awsVpcID": eks_vpc.vpc_id,
                "rbac": {
                    "create": True,
                    "serviceAccount": {
                        "create": False,
                        "name": "alb-ingress-controller"
                    }
                }
            })

        # Create code-server bastion
        # Get Latest Amazon Linux AMI
        amzn_linux = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        # Create SecurityGroup for code-server
        security_group = ec2.SecurityGroup(self,
                                           "SecurityGroup",
                                           vpc=eks_vpc,
                                           allow_all_outbound=True)

        security_group.add_ingress_rule(ec2.Peer.any_ipv4(),
                                        ec2.Port.tcp(8080))

        # Create our EC2 instance running CodeServer
        code_server_instance = ec2.Instance(
            self,
            "CodeServerInstance",
            instance_type=ec2.InstanceType("t3.large"),
            machine_image=amzn_linux,
            role=bastion_role,
            vpc=eks_vpc,
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            security_group=security_group,
            block_devices=[
                ec2.BlockDevice(device_name="/dev/xvda",
                                volume=ec2.BlockDeviceVolume.ebs(20))
            ])

        # Add UserData
        code_server_instance.user_data.add_commands(
            "mkdir -p ~/.local/lib ~/.local/bin ~/.config/code-server")
        code_server_instance.user_data.add_commands(
            "curl -fL https://github.com/cdr/code-server/releases/download/v3.5.0/code-server-3.5.0-linux-amd64.tar.gz | tar -C ~/.local/lib -xz"
        )
        code_server_instance.user_data.add_commands(
            "mv ~/.local/lib/code-server-3.5.0-linux-amd64 ~/.local/lib/code-server-3.5.0"
        )
        code_server_instance.user_data.add_commands(
            "ln -s ~/.local/lib/code-server-3.5.0/bin/code-server ~/.local/bin/code-server"
        )
        code_server_instance.user_data.add_commands(
            "echo \"bind-addr: 0.0.0.0:8080\" > ~/.config/code-server/config.yaml"
        )
        code_server_instance.user_data.add_commands(
            "echo \"auth: password\" >> ~/.config/code-server/config.yaml")
        code_server_instance.user_data.add_commands(
            "echo \"password: $(curl -s http://169.254.169.254/latest/meta-data/instance-id)\" >> ~/.config/code-server/config.yaml"
        )
        code_server_instance.user_data.add_commands(
            "echo \"cert: false\" >> ~/.config/code-server/config.yaml")
        code_server_instance.user_data.add_commands(
            "~/.local/bin/code-server &")
        code_server_instance.user_data.add_commands(
            "yum -y install jq gettext bash-completion moreutils")
        code_server_instance.user_data.add_commands(
            "sudo pip install --upgrade awscli && hash -r")
        code_server_instance.user_data.add_commands(
            "echo 'export ALB_INGRESS_VERSION=\"v1.1.8\"' >>  ~/.bash_profile")
        code_server_instance.user_data.add_commands(
            "curl --silent --location -o /usr/local/bin/kubectl \"https://amazon-eks.s3.us-west-2.amazonaws.com/1.17.9/2020-08-04/bin/linux/amd64/kubectl\""
        )
        code_server_instance.user_data.add_commands(
            "chmod +x /usr/local/bin/kubectl")
        code_server_instance.user_data.add_commands(
            "curl -L https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash"
        )
        code_server_instance.user_data.add_commands(
            "export ACCOUNT_ID=$(aws sts get-caller-identity --output text --query Account)"
        )
        code_server_instance.user_data.add_commands(
            "export AWS_REGION=$(curl -s 169.254.169.254/latest/dynamic/instance-identity/document | jq -r '.region')"
        )
        code_server_instance.user_data.add_commands(
            "echo \"export ACCOUNT_ID=${ACCOUNT_ID}\" | tee -a ~/.bash_profile"
        )
        code_server_instance.user_data.add_commands(
            "echo \"export AWS_REGION=${AWS_REGION}\" | tee -a ~/.bash_profile"
        )
        code_server_instance.user_data.add_commands(
            "aws configure set default.region ${AWS_REGION}")
        code_server_instance.user_data.add_commands(
            "curl --silent --location https://rpm.nodesource.com/setup_12.x | bash -"
        )
        code_server_instance.user_data.add_commands("yum -y install nodejs")
        code_server_instance.user_data.add_commands(
            "amazon-linux-extras enable python3")
        code_server_instance.user_data.add_commands(
            "yum install -y python3 --disablerepo amzn2-core")
        code_server_instance.user_data.add_commands("yum install -y git")
        code_server_instance.user_data.add_commands(
            "rm /usr/bin/python && ln -s /usr/bin/python3 /usr/bin/python && ln -s /usr/bin/pip3 /usr/bin/pip"
        )
        code_server_instance.user_data.add_commands("npm install -g aws-cdk")
        code_server_instance.user_data.add_commands(
            "echo 'export KUBECONFIG=~/.kube/config' >>  ~/.bash_profile")
        code_server_instance.user_data.add_commands(
            "git clone https://github.com/jasonumiker/eks-school.git")

        # Add ALB
        lb = elbv2.ApplicationLoadBalancer(self,
                                           "LB",
                                           vpc=eks_vpc,
                                           internet_facing=True)
        listener = lb.add_listener("Listener", port=80)
        listener.connections.allow_default_port_from_any_ipv4(
            "Open to the Internet")
        listener.connections.allow_to_any_ipv4(
            port_range=ec2.Port(string_representation="TCP 8080",
                                protocol=ec2.Protocol.TCP,
                                from_port=8080,
                                to_port=8080))
        listener.add_targets(
            "Target",
            port=8080,
            targets=[
                elbv2.InstanceTarget(
                    instance_id=code_server_instance.instance_id, port=8080)
            ])