Exemple #1
0
    def __init__(self, scope: core.Construct, id: str, vpc_cidr: str,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self._vpc = ec2.Vpc(
            self,
            id,
            cidr=vpc_cidr,
            enable_dns_hostnames=True,
            enable_dns_support=True,
            max_azs=2,
            subnet_configuration=[
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PUBLIC,
                                        name="BASTION",
                                        cidr_mask=24),
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PRIVATE,
                                        name="ECS",
                                        cidr_mask=24),
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PUBLIC,
                                        name="DBS",
                                        cidr_mask=24)
            ],
            nat_gateway_provider=ec2.NatProvider.gateway(),
            nat_gateway_subnets=ec2.SubnetSelection(
                one_per_az=True, subnet_group_name="BASTION"),
            gateway_endpoints={
                's3':
                ec2.GatewayVpcEndpointOptions(
                    service=ec2.GatewayVpcEndpointAwsService.S3,
                    subnets=[
                        ec2.SubnetSelection(one_per_az=True,
                                            subnet_type=ec2.SubnetType.PUBLIC)
                    ])
            })
    def create_endpoints(self) -> None:
        endpoints = {
            "ECS": ec2.InterfaceVpcEndpointAwsService.ECS,
            "ECR": ec2.InterfaceVpcEndpointAwsService.ECR,
            "ECR_DOCKER": ec2.InterfaceVpcEndpointAwsService.ECR_DOCKER,
            "CLOUDWATCH_LOGS":
            ec2.InterfaceVpcEndpointAwsService.CLOUDWATCH_LOGS,
            "SECRETS_MANAGER":
            ec2.InterfaceVpcEndpointAwsService.SECRETS_MANAGER,
        }

        for name, service in endpoints.items():
            ec2.InterfaceVpcEndpoint(
                self,
                name,
                vpc=self.instance,
                service=service,
                subnets=ec2.SubnetSelection(
                    subnet_type=ec2.SubnetType.ISOLATED),
                private_dns_enabled=True,
                security_groups=[self.vpc_endpoint_sg],
            )

        self.instance.add_gateway_endpoint(
            "s3-endpoint",
            service=ec2.GatewayVpcEndpointAwsService.S3,
            subnets=[ec2.SubnetSelection(subnet_type=ec2.SubnetType.ISOLATED)],
        )
    def __init__(self, scope: core.Construct, **kwargs) -> None:
        self.deploy_env = active_environment
        super().__init__(scope, id=f"{self.deploy_env.value}-common-stack", **kwargs)

        self.custom_vpc = ec2.Vpc(self, f"vpc-{self.deploy_env.value}")

        self.orders_rds_sg = ec2.SecurityGroup(
            self,
            f"orders-{self.deploy_env.value}-sg",
            vpc=self.custom_vpc,
            allow_all_outbound=True,
            security_group_name=f"orders-{self.deploy_env.value}-sg",
        )

        self.orders_rds_sg.add_ingress_rule(
            peer=ec2.Peer.ipv4("0.0.0.0/0"), connection=ec2.Port.tcp(5432)
        )

        for subnet in self.custom_vpc.private_subnets:
            self.orders_rds_sg.add_ingress_rule(
                peer=ec2.Peer.ipv4(subnet.ipv4_cidr_block), connection=ec2.Port.tcp(5432)
            )

        self.orders_rds_parameter_group = rds.ParameterGroup(
            self,
            f"orders-{self.deploy_env.value}-rds-parameter-group",
            description="Parameter group to allow CDC from RDS using DMS.",
            engine=rds.DatabaseInstanceEngine.postgres(
                version=rds.PostgresEngineVersion.VER_12_4
            ),
            parameters={"rds.logical_replication": "1", "wal_sender_timeout": "0"},
        )

        self.orders_rds = rds.DatabaseInstance(
            self,
            f"orders-{self.deploy_env.value}-rds",
            engine=rds.DatabaseInstanceEngine.postgres(
                version=rds.PostgresEngineVersion.VER_12_4
            ),
            database_name="orders",
            instance_type=ec2.InstanceType("t3.micro"),
            vpc=self.custom_vpc,
            instance_identifier=f"rds-{self.deploy_env.value}-orders-db",
            port=5432,
            vpc_placement=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            subnet_group=rds.SubnetGroup(
                self,
                f"rds-{self.deploy_env.value}-subnet",
                description="place RDS on public subnet",
                vpc=self.custom_vpc,
                vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            ),
            parameter_group=self.orders_rds_parameter_group,
            security_groups=[self.orders_rds_sg],
            removal_policy=core.RemovalPolicy.DESTROY,
            **kwargs,
        )
    def __init__(self, scope: core.Construct, id: str, eksname: str,
                 codebucket: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # //*************************************************//
        # //******************* NETWORK ********************//
        # //************************************************//
        # create VPC
        self._vpc = ec2.Vpc(self, 'eksVpc', max_azs=2)
        core.Tags.of(self._vpc).add('Name', eksname + 'EksVpc')

        self._log_bucket = s3.Bucket.from_bucket_name(self, 'vpc_logbucket',
                                                      codebucket)
        self._vpc.add_flow_log("FlowLogCloudWatch",
                               destination=ec2.FlowLogDestination.to_s3(
                                   self._log_bucket),
                               traffic_type=ec2.FlowLogTrafficType.REJECT)
        # VPC endpoint security group
        self._vpc_endpoint_sg = ec2.SecurityGroup(
            self,
            'EndpointSg',
            security_group_name='SparkOnEKS-VPCEndpointSg',
            vpc=self._vpc,
            description='Security Group for Endpoint',
        )
        self._vpc_endpoint_sg.add_ingress_rule(
            ec2.Peer.ipv4(self._vpc.vpc_cidr_block), ec2.Port.tcp(port=443))
        core.Tags.of(self._vpc_endpoint_sg).add('Name',
                                                'SparkOnEKS-VPCEndpointSg')

        # Add VPC endpoint
        self._vpc.add_gateway_endpoint(
            "S3GatewayEndpoint",
            service=ec2.GatewayVpcEndpointAwsService.S3,
            subnets=[
                ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
                ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE)
            ])

        self._vpc.add_interface_endpoint(
            "EcrDockerEndpoint",
            service=ec2.InterfaceVpcEndpointAwsService.ECR_DOCKER,
            security_groups=[self._vpc_endpoint_sg])
        self._vpc.add_interface_endpoint(
            "CWLogsEndpoint",
            service=ec2.InterfaceVpcEndpointAwsService.CLOUDWATCH_LOGS,
            security_groups=[self._vpc_endpoint_sg])
        self._vpc.add_interface_endpoint(
            "AthenaEndpoint",
            service=ec2.InterfaceVpcEndpointAwsService.ATHENA,
            security_groups=[self._vpc_endpoint_sg])
        self._vpc.add_interface_endpoint(
            "KMSEndpoint",
            service=ec2.InterfaceVpcEndpointAwsService.KMS,
            security_groups=[self._vpc_endpoint_sg])
Exemple #5
0
    def __init__(self, scope: core.Construct, id: str, props,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # VPCの作成
        # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/Vpc.html

        vpc = ec2.Vpc(
            self,
            'VPC',
            cidr='10.0.0.0/16',
            max_azs=2,
            subnet_configuration=[
                ec2.SubnetConfiguration(cidr_mask=24,
                                        name='Public',
                                        subnet_type=ec2.SubnetType.PUBLIC),
                ec2.SubnetConfiguration(cidr_mask=24,
                                        name='Private',
                                        subnet_type=ec2.SubnetType.PRIVATE)
            ])

        # VPCエンドポイントのためのセキュリティーグループ
        endpoint_sg = ec2.SecurityGroup(self, 'EndpointSecurityGroup', vpc=vpc)

        # S3のVPCエンドポイントの作成
        s3_endpoint = vpc.add_gateway_endpoint(
            id='S3Endpoint',
            service=ec2.GatewayVpcEndpointAwsService.S3,
            subnets=[ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE)])
        # ECRのDockerのVPCエンドポイントの作成
        ecr_docker_endpoint = vpc.add_interface_endpoint(
            id='EcrDockerEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService.ECR_DOCKER,
            private_dns_enabled=True,
            security_groups=[endpoint_sg],
            subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE))
        # CloudWatch LogsのVPCエンドポイントの作成
        cloudwatch_logs_endpoint = vpc.add_interface_endpoint(
            id='CloudWatchLogsEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService.CLOUDWATCH_LOGS,
            private_dns_enabled=True,
            security_groups=[endpoint_sg],
            subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE))
        # DynamoDBのVPCエンドポイントの作成
        dynamodb_endpoint = vpc.add_gateway_endpoint(
            id='DynamoDbEndpoint',
            service=ec2.GatewayVpcEndpointAwsService.DYNAMODB,
            subnets=[ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE)])

        self.output_props = props.copy()
        self.output_props['vpc'] = vpc
        self.output_props['endpoint_sg'] = endpoint_sg
    def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = props['vpc']
        internal_sg = props['internal_sg']
        bastion_sg = props['bastion_sg']

        # Bastion用Linux
        bastion_linux = ec2.Instance(
            self, 'BastionLinux',
            instance_type=ec2.InstanceType('t3.micro'),
            machine_image=ec2.MachineImage.latest_amazon_linux(
                generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2),
            key_name=self.node.try_get_context('key_name'),
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            security_group=internal_sg
        )
        bastion_linux.role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name('AmazonSSMManagedInstanceCore'))
        bastion_linux.add_security_group(bastion_sg)

        # Bastion用Windows
        bastion_windows = ec2.Instance(
            self, 'BastionWindows',
            instance_type=ec2.InstanceType('t3.large'),
            machine_image=ec2.MachineImage.latest_windows(
                version=ec2.WindowsVersion.WINDOWS_SERVER_2016_JAPANESE_FULL_BASE),
            key_name=self.node.try_get_context('key_name'),
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            security_group=internal_sg
        )
        bastion_windows.role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name('AmazonSSMManagedInstanceCore'))
        bastion_windows.add_security_group(bastion_sg)

        # Radius用EC2ホスト
        radius_host = ec2.Instance(
            self, 'RadiusHost',
            instance_type=ec2.InstanceType('t3.small'),
            machine_image=ec2.MachineImage.latest_amazon_linux(
                generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2),
            key_name=self.node.try_get_context('key_name'),
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE),
            security_group=internal_sg
        )
        radius_host.role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name('AmazonSSMManagedInstanceCore'))

        self.output_props = props.copy()
    def __init__(self, scope: core.Construct, environment: Environment, **kwargs) -> None:
        self.env = environment.value
        super().__init__(scope, id=f'{self.env}-common', **kwargs)

        self.custom_vpc = ec2.Vpc(
            self,
            f'vpc-{self.env}'
        )

        self.orders_rds_sg = ec2.SecurityGroup(
            self,
            f'orders-{self.env}-sg',
            vpc=self.custom_vpc,
            allow_all_outbound=True,
            security_group_name=f'orders-{self.env}-sg',
        )

        self.orders_rds_sg.add_ingress_rule(
            peer=ec2.Peer.ipv4('37.156.75.55/32'),
            connection=ec2.Port.tcp(5432)
        )

        for subnet in self.custom_vpc.private_subnets:
            self.orders_rds_sg.add_ingress_rule(
                peer=ec2.Peer.ipv4(subnet.ipv4_cidr_block),
                connection=ec2.Port.tcp(5432)
            )

        self.orders_rds = rds.DatabaseInstance(
            self,
            f'orders-{self.env}-rds',
            engine=rds.DatabaseInstanceEngine.postgres(version=rds.PostgresEngineVersion.VER_11_2),
            database_name='orders',
            instance_type=ec2.InstanceType('t3.micro'),
            vpc=self.custom_vpc,
            instance_identifier=f'rds-{self.env}-orders-db',
            port=5432,
            vpc_placement=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            subnet_group=rds.SubnetGroup(
                self,
                f'rds-{self.env}-subnet',
                description='place RDS on public subnet',
                vpc=self.custom_vpc,
                vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC)
            ),
            security_groups=[
                self.orders_rds_sg
            ],
            removal_policy=core.RemovalPolicy.DESTROY,
            **kwargs
        )
Exemple #8
0
    def __init__(self, scope: core.Construct, construct_id: str, vpc: ec2.Vpc,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here
        env_name = self.node.try_get_context("env")
        eks_role = iam.Role(
            self,
            "eksadmin",
            assumed_by=iam.ServicePrincipal(service='ec2.amazonaws.com'),
            role_name='eks-cluster-role',
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    managed_policy_name='AdministratorAccess')
            ])
        eks_instance_profile = iam.CfnInstanceProfile(
            self,
            'instanceprofile',
            roles=[eks_role.role_name],
            instance_profile_name='eks-cluster-role')

        cluster = eks.Cluster(
            self,
            'prod',
            cluster_name='ie-prod-snow-common',
            version=eks.KubernetesVersion.V1_19,
            vpc=vpc,
            vpc_subnets=[
                ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE)
            ],
            default_capacity=0,
            masters_role=eks_role)

        nodegroup = cluster.add_nodegroup_capacity(
            'eks-nodegroup',
            instance_types=[
                ec2.InstanceType('t3.large'),
                ec2.InstanceType('m5.large'),
                ec2.InstanceType('c5.large')
            ],
            disk_size=50,
            min_size=2,
            max_size=2,
            desired_size=2,
            subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE),
            remote_access=eks.NodegroupRemoteAccess(
                ssh_key_name='ie-prod-snow-common'),
            capacity_type=eks.CapacityType.SPOT)
Exemple #9
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        
        vpc = ec2.Vpc.from_lookup(self,'vpc',
            vpc_id='vpc-082a9f3f7200f4513'
        )

        k8s_admin = iam.Role(self, "k8sadmin",
            assumed_by=iam.ServicePrincipal(service='ec2.amazonaws.com'),
            role_name='eks-master-role',
            managed_policies=[iam.ManagedPolicy.from_aws_managed_policy_name(
                    managed_policy_name='AdministratorAccess'
                )
            ]
        
        ) 
        k8s_instance_profile = iam.CfnInstanceProfile(self, 'instanceprofile',
            roles=[k8s_admin.role_name],
            instance_profile_name='eks-master-role'
        )

        cluster = eks.Cluster(self, 'dev',
            cluster_name='eks-cdk-demo',
            version='1.15',
            vpc=vpc,
            vpc_subnets=[ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE)],
            default_capacity=0,
            kubectl_enabled=True,
            #security_group=k8s_sg,
            masters_role= k8s_admin

        )
        #cluster.aws_auth.add_user_mapping(adminuser, {groups['system:masters']})
        
        ng = cluster.add_nodegroup('eks-ng',
            nodegroup_name='eks-ng',
            instance_type=ec2.InstanceType('t3.medium'),
            disk_size=5,
            min_size=1,
            max_size=1,
            desired_size=1,
            subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE),
            remote_access=eks.NodegroupRemoteAccess(ssh_key_name='k8s-nodes')
            
        )

        
        
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 repository_name: str,
                 directory: str,
                 subnet_group_name: str,
                 context: InfraContext,
                 securityGroups: typing.Optional[typing.List[
                     ec2.SecurityGroup]] = None,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.repo = assets.DockerImageAsset(self,
                                            'Repo',
                                            directory=os.path.join(
                                                src_root_dir, directory),
                                            repository_name=repository_name)

        self.function = lambda_.DockerImageFunction(
            self,
            'ContainerFunction',
            code=lambda_.DockerImageCode.from_ecr(
                repository=self.repo.repository,
                tag=self.repo.image_uri.split(':')[-1]
            ),  # lambda_.DockerImageCode.from_image_asset(directory=os.path.join(src_root_dir,directory)),
            description='Python container lambda function for ' +
            repository_name,
            timeout=core.Duration.minutes(1),
            tracing=lambda_.Tracing.ACTIVE,
            vpc=context.networking.vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_group_name=subnet_group_name),
            security_groups=securityGroups)
Exemple #11
0
    def _create_backend_store(self):
        """
        Create a RDS as a backend store for MLflow server
        """
        # Creates a security group for AWS RDS
        self.sg_rds = ec2.SecurityGroup(scope=self, id="SGRDS", vpc=self.vpc, security_group_name="sg_rds")
        # Adds an ingress rule which allows resources in the VPC's CIDR to access the database.
        self.sg_rds.add_ingress_rule(peer=ec2.Peer.ipv4("10.0.0.0/24"), connection=ec2.Port.tcp(self.port))

        backend_store_id = f"{self.stack_name}-{self.component_id}-backend-store"

        self.database = rds.DatabaseInstance(
            scope=self,
            id=backend_store_id,
            database_name=self.dbname,
            port=self.port,
            credentials=rds.Credentials.from_username(
                username=self.username, password=self.db_password_secret.secret_value
            ),
            engine=rds.DatabaseInstanceEngine.mysql(version=rds.MysqlEngineVersion.VER_8_0_19),
            instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL),
            vpc=self.vpc,
            security_groups=[self.sg_rds],
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_ISOLATED),
            # multi_az=True,
            removal_policy=RemovalPolicy.DESTROY,
            deletion_protection=False,
        )
Exemple #12
0
    def __init__(self, scope: core.Construct, id: str, vpc: aws_ec2.Vpc,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        _subnets = []
        _subnets.append(
            aws_ec2.Subnet(self,
                           'sbn-redshift-1',
                           availability_zone=vpc.availability_zones[0],
                           vpc_id=vpc.vpc_id,
                           cidr_block='10.0.4.0/25'))

        _subnets.append(
            aws_ec2.Subnet(self,
                           'sbn-redshift-2',
                           availability_zone=vpc.availability_zones[1],
                           vpc_id=vpc.vpc_id,
                           cidr_block='10.0.4.128/25'))

        _cluster_subnet_group = aws_redshift.ClusterSubnetGroup(
            self,
            'deta-pipeline-redshift-subnet',
            description='redshift cluster subnet',
            vpc=vpc,
            vpc_subnets=aws_ec2.SubnetSelection(subnets=_subnets))

        aws_redshift.Cluster(
            self,
            'destination-redshift',
            master_user=aws_redshift.Login(master_username='******'),
            vpc=vpc,
            subnet_group=_cluster_subnet_group)
Exemple #13
0
    def __init__(self, scope: core.Construct, id: str, props,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = props['vpc_dr']
        internal_sg = props['internal_sg_dr']
        bastion_sg = props['bastion_sg_dr']

        # Bastion用EC2ホスト
        bastion_windows = ec2.Instance(
            self,
            'Bastion',
            instance_type=ec2.InstanceType('t3.large'),
            machine_image=ec2.MachineImage.latest_windows(
                version=ec2.WindowsVersion.
                WINDOWS_SERVER_2016_JAPANESE_FULL_BASE),
            key_name=self.node.try_get_context('key_name'),
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            security_group=internal_sg)
        bastion_windows.role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                'AmazonSSMManagedInstanceCore'))
        bastion_windows.add_security_group(bastion_sg)

        self.output_props = props.copy()
Exemple #14
0
 def build_file_system(
     scope: core.Construct,
     name: str,
     efs_life_cycle: str,
     vpc: ec2.IVpc,
     efs_security_group: ec2.ISecurityGroup,
     subnets: List[ec2.ISubnet],
     team_kms_key: kms.Key,
 ) -> efs.FileSystem:
     fs_name: str = f"orbit-{name}-fs"
     efs_fs: efs.FileSystem = efs.FileSystem(
         scope=scope,
         id=fs_name,
         file_system_name=fs_name,
         vpc=vpc,
         encrypted=True,
         lifecycle_policy=efs.LifecyclePolicy[efs_life_cycle]
         if efs_life_cycle else None,
         performance_mode=efs.PerformanceMode.GENERAL_PURPOSE,
         throughput_mode=efs.ThroughputMode.BURSTING,
         security_group=efs_security_group,
         vpc_subnets=ec2.SubnetSelection(subnets=subnets),
         kms_key=cast(Optional[kms.IKey], team_kms_key),
     )
     return efs_fs
Exemple #15
0
    def setup_emqx(self, N, vpc, zone, sg, key):
        self.emqx_vms = []
        for n in range(0, N):
            name = "emqx-%d" % n
            vm = ec2.Instance(self, id = name,
                              instance_type = ec2.InstanceType(instance_type_identifier=emqx_ins_type),
                              machine_image = linux_ami,
                              user_data = ec2.UserData.custom(user_data),
                              security_group = sg,
                              key_name = key,
                              vpc = vpc,
                              vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE),
            )

            self.emqx_vms.append(vm)

            r53.ARecord(self, id = name + '.int.emqx',
                        record_name = name + '.int.emqx',
                        zone = zone,
                        target = r53.RecordTarget([vm.instance_private_ip])
            )

            # tagging
            if self.user_defined_tags:
                core.Tags.of(vm).add(*self.user_defined_tags)
            core.Tags.of(vm).add('service', 'emqx')
Exemple #16
0
    def __init__(self, scope: core.Construct, id: str, vpc, config,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        prj_name = self.node.try_get_context("project_name")
        env_name = self.node.try_get_context("env")
        name = config['ec2']['name']
        key = config['ec2']['ssh_key']

        ubuntu_ami = ec2.GenericLinuxImage(
            {"ap-southeast-1": "ami-028be27cf930f7a43"})

        # Create bastion host
        self.bastion = ec2.Instance(
            self,
            'Instance',
            instance_type=ec2.InstanceType("t3.small"),
            instance_name=f"{name}-bastion",
            key_name=f"{key}",
            machine_image=ubuntu_ami,
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PRIVATE),
        )
        self.bastion.apply_removal_policy(core.RemovalPolicy.DESTROY)

        self.bastion.connections.allow_from_any_ipv4(
            port_range=ec2.Port.tcp(22),
            description='Allow public SSH connections')
        self.bastion.connections.allow_from_any_ipv4(
            port_range=ec2.Port.icmp_ping(),
            description='Allow public ICMP ping')

        core.CfnOutput(self,
                       f'{name}-private-ip',
                       value=self.bastion.instance_private_ip)
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        #import vpc info
        vpc = aws_ec2.Vpc.from_lookup(self, "vpc", vpc_id="vpc-1f39b977")

        #import user-data scripts
        with open("userdata_scripts/setup.sh", mode="r") as file:
            user_data = file.read()

        #get latest ami from any region
        aws_linux_ami = aws_ec2.MachineImage.latest_amazon_linux(
            generation=aws_ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=aws_ec2.AmazonLinuxEdition.STANDARD,
            storage=aws_ec2.AmazonLinuxStorage.EBS,
            virtualization=aws_ec2.AmazonLinuxVirt.HVM)

        #ec2
        test_server = aws_ec2.Instance(
            self,
            "ec2id",
            instance_type=aws_ec2.InstanceType(
                instance_type_identifier="t2.micro"),
            instance_name="TestServer01",
            machine_image=aws_linux_ami,
            vpc=vpc,
            vpc_subnets=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PUBLIC),
            key_name="SAA-C01",
            user_data=aws_ec2.UserData.custom(user_data))

        #add custom ebs for ec2
        test_server.instance.add_property_override(
            "BlockDeviceMappings", [{
                "DeviceName": "/dev/sdb",
                "Ebs": {
                    "VolumeSize": "10",
                    "VolumeType": "io1",
                    "Iops": "100",
                    "DeleteOnTermination": "true"
                }
            }])

        #allow web traffic
        test_server.connections.allow_from_any_ipv4(
            aws_ec2.Port.tcp(80), description="allow web traffic")

        # add permission to instances profile
        test_server.role.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonSSMManagedInstanceCore"))
        test_server.role.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonS3ReadOnlyAccess"))

        output_server_ip = core.CfnOutput(self,
                                          "serverip01",
                                          description="test server ip",
                                          value=test_server.instance_public_ip)
Exemple #18
0
    def __init__(self, app: core.App, id: str, **kwargs) -> None:
        super().__init__(app, id, **kwargs)

        vpc = ec2.Vpc(self, "VPC")

        # Security group for our test instance
        my_sg = ec2.SecurityGroup(self,
                                  "my_sg",
                                  vpc=vpc,
                                  description="My sg for testing",
                                  allow_all_outbound=True)
        # Add ssh from anywhere
        my_sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(22),
                               "Allow ssh access from anywhere")

        asg = autoscaling.AutoScalingGroup(
            self,
            "ASG",
            vpc=vpc,
            instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2,
                                              ec2.InstanceSize.MICRO),
            machine_image=ec2.AmazonLinuxImage(),
        )
        asg.add_security_group(my_sg)  # add our security group, expects object

        ## Classic Elastic Load Balancer
        #lb = elb.LoadBalancer(
        #    self, "ELB",
        #    vpc=vpc,
        #    internet_facing=True,
        #    health_check={"port": 22}
        #)
        #lb.add_target(asg)
        #
        #listener = lb.add_listener(
        #    external_port=8000,
        #    external_protocol=elb.LoadBalancingProtocol.TCP,
        #    internal_port=22,
        #    internal_protocol=elb.LoadBalancingProtocol.TCP
        #)
        #listener.connections.allow_default_port_from_any_ipv4("Open to the world")

        # Network Load Balancer
        nlb = elbv2.NetworkLoadBalancer(
            self,
            "NLB",
            vpc=vpc,
            internet_facing=True,
            cross_zone_enabled=True,
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC))

        my_target = elbv2.NetworkTargetGroup(self,
                                             "MyTargetGroup",
                                             port=22,
                                             vpc=vpc)

        listener = nlb.add_listener("Listener",
                                    port=8000,
                                    default_target_groups=[my_target])
        my_target.add_target(asg)
    def create_asg_by_service(self, name, userdata_path, key_name, port):
        role = self.output_props["default_role"]
        sg = self.output_props["sg"]
        nlb = self.output_props["nlb"]
        userdata = ec2.UserData.for_linux()
        with open(userdata_path, "rb") as f:
            userdata.add_commands(str(f.read(), 'utf-8'))

        asg = autoscaling.AutoScalingGroup(
            self,
            name,
            vpc=self.output_props['vpc'],
            instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3,
                                              ec2.InstanceSize.MICRO),
            machine_image=ec2.AmazonLinuxImage(
                generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2),
            key_name=key_name,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PRIVATE),
            user_data=userdata,
            security_group=sg,
            spot_price="0.005",
            min_capacity=1,
            max_capacity=3,
            desired_capacity=1,
            role=role,
        )

        nlb.add_listener(name, port=port, protocol=lb.Protocol.TCP) \
            .add_targets(name, port=port, targets=[asg])
Exemple #20
0
    def create_asg(self, vpc):
        asg = autoscaling.AutoScalingGroup(
            self,
            'SingleInstanceAsg',
            vpc=vpc,
            machine_image=ecs.EcsOptimizedAmi(),
            instance_type=ec2.InstanceType('t2.micro'),
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            associate_public_ip_address=True,
            # We only need 1 instance in the ASG
            max_capacity=1,
            desired_capacity=1,
        )

        security_group = ec2.SecurityGroup(
            self,
            'GhostSg',
            vpc=vpc,
        )

        # Allow ingress traffic to port 80
        security_group.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port.tcp(80),
        )

        # Allow NFS traffic for mounting EFS volumes
        security_group.add_ingress_rule(peer=ec2.Peer.ipv4('10.0.0.0/16'),
                                        connection=ec2.Port.tcp(2049))

        asg.add_security_group(security_group)
        return asg
Exemple #21
0
    def __init__(self, scope: core.Construct, id: str, vpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        prj_name = self.node.try_get_context("project_name")
        env_name = self.node.try_get_context("env")

        rds_sg = ec2.SecurityGroup(self,
                                   'rds-sg',
                                   vpc=vpc,
                                   security_group_name=prj_name + env_name +
                                   '-rds-sg',
                                   description="SG for RDS",
                                   allow_all_outbound=True)

        for subnet in vpc.private_subnets:
            rds_sg.add_ingress_rule(
                peer=ec2.Peer.ipv4(subnet.ipv4_cidr_block),
                connection=ec2.Port.tcp(3306),
                description='Allow all private subnet to access RDS')

        db_mysql = rds.DatabaseCluster(
            self,
            'mysql',
            default_database_name=prj_name + env_name,
            engine=rds.DatabaseClusterEngine.aurora_mysql(
                version=rds.AuroraMysqlEngineVersion.VER_5_7_12),
            instances=1,
            instance_props=rds.InstanceProps(
                vpc=vpc,
                instance_type=ec2.InstanceType(
                    instance_type_identifier="t3.small"),
                vpc_subnets=ec2.SubnetSelection(
                    subnet_type=ec2.SubnetType.PRIVATE)),
            removal_policy=core.RemovalPolicy.DESTROY)
    def _create_network(self):
        """
        Create VPC, PrivateSubnet, SecrityGroup for RDS
        """

        public_subnet_config = ec2.SubnetConfiguration(
            cidr_mask=24,
            name=f"{self.name_prefix}-subnet-public",
            subnet_type=ec2.SubnetType.PUBLIC)
        private_subnet_config = ec2.SubnetConfiguration(
            cidr_mask=24,
            name=f"{self.name_prefix}-subnet-private",
            subnet_type=ec2.SubnetType.PRIVATE_WITH_NAT)
        self.vpc = ec2.Vpc(
            self,
            "VPC",
            cidr=self.context['vpc']['cidr'],
            subnet_configuration=[public_subnet_config, private_subnet_config],
            nat_gateways=1,
            max_azs=2)
        self.security_group = ec2.SecurityGroup(
            self,
            "SecurityGroupForRDS",
            vpc=self.vpc,
            security_group_name=f"{self.name_prefix}-security-group-name")

        self.security_group.add_ingress_rule(
            self.security_group, ec2.Port.tcp(3306),
            "allow access from my security group")

        self.subnet_selection = ec2.SubnetSelection(
            one_per_az=False,
            subnets=self.vpc.private_subnets,
        )
Exemple #23
0
    def __init__(self, scope: core.Construct, id: str, vpc, sg, redissg,
                 kmskey, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        rdskey = kms.Key.from_key_arn(self, "rdskey", key_arn=kmskey)

        db_mysql = rds.DatabaseCluster(
            self,
            "Dev_MySQL",
            default_database_name="msadev",
            engine=rds.DatabaseClusterEngine.AURORA_MYSQL,
            engine_version="5.7.12",
            master_user=rds.Login(username="******"),
            instance_props=rds.InstanceProps(
                vpc=vpc,
                vpc_subnets=ec2.SubnetSelection(
                    subnet_type=ec2.SubnetType.ISOLATED),
                instance_type=ec2.InstanceType(
                    instance_type_identifier="t3.medium")),
            instances=1,
            parameter_group=rds.ClusterParameterGroup.
            from_parameter_group_name(
                self,
                "paramter-group-msadev",
                parameter_group_name="default.aurora-mysql5.7"),
            kms_key=rdskey)
        sgId = ec2.SecurityGroup.from_security_group_id(self, "sgid", sg)
        redis_sg = ec2.SecurityGroup.from_security_group_id(
            self, "redissgid", redissg)

        db_mysql.connections.allow_default_port_from(sgId,
                                                     "Access from Bastion")
        db_mysql.connections.allow_default_port_from(redis_sg,
                                                     "Access from Redis")
Exemple #24
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = ec2.Vpc(self,
                      'vpc1'
                      )

        bucket_name = 'my-cdk-bucket'
        s3.Bucket(self,
                  bucket_name,
                  bucket_name=bucket_name,
                  access_control=s3.BucketAccessControl.PUBLIC_READ_WRITE,
                  removal_policy=RemovalPolicy.DESTROY)

        ec2.Volume(self, 'vol1', availability_zone='us-east-1a', size=core.Size.gibibytes(8))

        sg = ec2.SecurityGroup(self,
                               'sg1',
                               vpc=vpc)
        sg.add_ingress_rule(Peer.any_ipv4(), Port.tcp(22))

        kms.Key(self, 'kms1')

        rds.DatabaseInstance(self,
                             'rds1',
                             engine=rds.DatabaseInstanceEngine.postgres(version=PostgresEngineVersion.VER_12),
                             master_username='******',
                             vpc=vpc,
                             vpc_placement=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC))
Exemple #25
0
    def __init__(self, scope: core.Construct, construct_id: str, vpc, ec2_capacity=False, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self.cluster = ecs.Cluster(self, "CdkCluster",
                                   cluster_name="cdk-ecs",
                                   vpc=vpc)

        if ec2_capacity:
            key_name = os.getenv("EC2_KEY_PAIR_NAME")
            asg = self.cluster.add_capacity("CdkClusterCapacity",
                                            instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3_AMD,
                                                                              ec2.InstanceSize.SMALL),
                                            key_name=key_name,
                                            cooldown=cdk_core.Duration.seconds(30),
                                            min_capacity=1,
                                            max_capacity=5)

            # note: CDK doesn't support ECS capacity providers yet
            asg.scale_to_track_metric("CpuReservationScalingPolicy",
                                      metric=self.cluster.metric_cpu_reservation(),
                                      target_value=50)
            asg.scale_to_track_metric("MemoryReservationScalingPolicy",
                                      metric=self.cluster.metric_memory_reservation(),
                                      target_value=50)

            bastion = ec2.BastionHostLinux(self, "CdkBastion",
                                           vpc=vpc,
                                           subnet_selection=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC))
            bastion.allow_ssh_access_from(ec2.Peer.any_ipv4())
            bastion.instance.instance.key_name = key_name

            asg.connections.allow_from(bastion, ec2.Port.tcp(22))
Exemple #26
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 vpc: ec2.Vpc,
                 sg: ec2.ISecurityGroup,
                 stage={},
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        prefix_name = f'{stage["vpc_prefix"]}-{stage["stage_name"]}-{self.node.try_get_context("customer")}'

        bastion_host = ec2.Instance(
            self,
            f'{prefix_name}-public-bastion',
            instance_type=ec2.InstanceType('t3.micro'),
            machine_image=ec2.AmazonLinuxImage(
                edition=ec2.AmazonLinuxEdition.STANDARD,
                generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
                virtualization=ec2.AmazonLinuxVirt.HVM,
                storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE),
            vpc=vpc,
            key_name=stage["key_name"],
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            security_group=sg)

        core.Tags.of(bastion_host).add("Name", f'{prefix_name}-public-bastion')

        core.CfnOutput(self, 'my-bastion-id', value=bastion_host.instance_id)
Exemple #27
0
    def __init__(self, scope: core.Construct, id: str, vpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        amzn_linux = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE)
        linux = ec2.MachineImage.generic_linux(
            {"ap-south-1": "ami-0b9d66ddb2a9f47d1"})

        data = open("./scripts/userdata/userdata.sh", "rb").read()
        user_data = ec2.UserData.for_linux()
        user_data.add_commands(str(data, 'utf-8'))

        self.auto_scaling_group = autoscaling.AutoScalingGroup(
            self,
            "FirstASG",
            instance_type=ec2.InstanceType('t2.micro'),
            machine_image=ec2.AmazonLinuxImage(
                generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2),
            vpc=vpc,
            user_data=user_data,
            desired_capacity=6,
            key_name="ap-south-1",
            min_capacity=1,
            vpc_subnets=ec2.SubnetSelection(
                availability_zones=["ap-south-1a", "ap-south-1b"]))
Exemple #28
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.IVpc, config: dict, region: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        self._region = region

        ### EC2 Server for Jenkins
        image = ec2.GenericLinuxImage(
            {
                region: config["ami_id"],
            },
        )

        self._role = iam.Role(self, "InstanceRole", assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"))
        for policy in config["iam_role_policies"]:
            self._role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name(policy))

        subnet = vpc.select_subnets(subnet_type=ec2.SubnetType.PRIVATE).subnets[0]
        subnet_selection = ec2.SubnetSelection(subnets=[subnet])

        self.security_group = ec2.SecurityGroup(
            self, "EC2SG",
            vpc=vpc
        )

        self._instance = ec2.Instance(
            self, "EC2",
            instance_type=ec2.InstanceType(config["instance_type"]),
            machine_image=image,
            vpc=vpc,
            vpc_subnets=subnet_selection,
            role=self._role,
            security_group=self.security_group
        )

        core.CfnOutput(self, "CodeServerInstanceID", value=self._instance.instance_id)
Exemple #29
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = _ec2.Vpc.from_lookup(self, "importedVPC", vpc_id="vpc-d0a193aa")

        # Read BootStrap Script
        with open("bootstrap_scripts/install_httpd.sh", mode="r") as file:
            user_data = file.read()

        # WebServer Instance 001
        web_server = _ec2.Instance(
            self,
            "WebServer001Id",
            instance_type=_ec2.InstanceType(
                instance_type_identifier="t2.micro"),
            instance_name="WebServer001",
            machine_image=_ec2.MachineImage.generic_linux(
                {"us-east-1": "ami-0fc61db8544a617ed"}),
            vpc=vpc,
            vpc_subnets=_ec2.SubnetSelection(
                subnet_type=_ec2.SubnetType.PUBLIC),
            user_data=_ec2.UserData.custom(user_data))

        output_1 = core.CfnOutput(
            self,
            "webServer001Ip",
            description="WebServer Public Ip Address",
            value=f"http://{web_server.instance_public_ip}")

        # Allow Web Traffic to WebServer
        web_server.connections.allow_from_any_ipv4(
            _ec2.Port.tcp(80), description="Allow Web Traffic")
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # vpc
        self.vpc = ec2.Vpc(
            self,
            "Vpc",
            max_azs=2,
            subnet_configuration=[
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PUBLIC,
                                        name="Public",
                                        cidr_mask=24),
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PRIVATE,
                                        name="Private",
                                        cidr_mask=24)
            ],
            nat_gateway_provider=ec2.NatProvider.gateway(),
            nat_gateways=2,
        )
        core.CfnOutput(self, "VpcID", value=self.vpc.vpc_id)

        self.load_balancer = elbv2.ApplicationLoadBalancer(
            self,
            "ExternalEndpoint",
            vpc=self.vpc,
            internet_facing=True,
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
        )

        core.CfnOutput(self,
                       "ExternalDNSName",
                       value=self.load_balancer.load_balancer_dns_name)