Пример #1
0
    def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # elastic policy
        elastic_policy = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=[
                "es:*",
            ],
            resources=["*"],
        )
        elastic_policy.add_any_principal()

        self.elastic_domain = aes.Domain(
            self,
            "elastic_domain",
            version=aes.ElasticsearchVersion.V7_9,
            capacity=aes.CapacityConfig(data_node_instance_type="t3.small.elasticsearch", data_nodes=1),
            ebs=aes.EbsOptions(enabled=True, volume_size=10),
            access_policies=[elastic_policy],
            fine_grained_access_control=aes.AdvancedSecurityOptions(
                master_user_name=config.get_es_credentials()[0],
                master_user_password=core.SecretValue(config.get_es_credentials()[1]),
            ),
            zone_awareness=aes.ZoneAwarenessConfig(enabled=False),
            node_to_node_encryption=True,
            encryption_at_rest=aes.EncryptionAtRestOptions(enabled=True),
            enforce_https=True,
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        core.Tags.of(self.elastic_domain).add("system-id", config.get_system_id())

        core.CfnOutput(self, "ESDomainEndpoint", value=self.elastic_domain.domain_endpoint)
Пример #2
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc,
                 security_group: ec2.SecurityGroup, config: dict,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vdb_rds_params = rds.ParameterGroup(self,
                                            id='rds-pg-vdb-cdk',
                                            family='postgres11',
                                            parameters={
                                                'autovacuum': '1',
                                                'autovacuum_work_mem': '-1',
                                                'autovacuum_max_workers': '3',
                                                'huge_pages': 'on',
                                                'log_min_duration_statement':
                                                '1000',
                                                'track_counts': '1',
                                                'maintenance_work_mem':
                                                '524288',
                                                'shared_buffers': '262144',
                                                'seq_page_cost': '1',
                                                'random_page_cost': '2',
                                                'min_wal_size': '512',
                                                'max_wal_size': '4096',
                                                'wal_compression': '1',
                                                'work_mem': '262144',
                                                'temp_file_limit': '10485760',
                                                'effective_cache_size':
                                                '786432'
                                            })

        self.vdb_rds = rds.DatabaseInstance(
            self,
            id='VdbCdk',
            database_name=config['DATABASE_NAME'],
            instance_identifier='vdb-prod-cdk',
            master_username=config['DATABASE_USER'],
            master_user_password=core.SecretValue(
                value=config['DATABASE_PASSWORD']),
            port=5432,
            engine=rds.DatabaseInstanceEngine.POSTGRES,
            engine_version='11.6',
            instance_class=ec2.InstanceType('t3.large'),
            allocated_storage=100,
            storage_encrypted=False,
            multi_az=False,
            storage_type=rds.StorageType.GP2,
            allow_major_version_upgrade=False,
            auto_minor_version_upgrade=False,
            preferred_maintenance_window='sun:02:00-sun:04:00',
            copy_tags_to_snapshot=True,
            backup_retention=core.Duration.days(7),
            preferred_backup_window='04:00-06:00',
            parameter_group=vdb_rds_params,
            vpc=vpc,
            security_groups=[security_group])
Пример #3
0
 def console_password(self,
                      secret_name: str,
                      template: str = None,
                      key: str = None):
     self.secret = asm.Secret(
         self,
         id,
         generate_secret_string=asm.SecretStringGenerator(
             secret_string_template=template,
             generate_string_key=key,
             password_length=24,
             exclude_characters='"@/\$'),
         secret_name='{}{}'.format(secret_name, id))
     return core.SecretValue(self.secret.secret_value.to_string())
Пример #4
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        try:
            with open('../.secrets/github_token.txt') as f:
                github_token = f.read()
        except FileNotFoundError:
            print(
                "Create ../.secrets/github_token.txt and put the token which you create in the github interface into it."
            )

        source_output = aws_codepipeline.Artifact(artifact_name='source')

        ecr, cb_docker_build = self._get_build_project()

        pipeline = aws_codepipeline.Pipeline(
            self,
            "Pipeline",
            pipeline_name="cdk-pipeline",
            stages=[
                aws_codepipeline.StageProps(
                    stage_name='Source',
                    actions=[
                        aws_codepipeline_actions.GitHubSourceAction(
                            output=source_output,
                            action_name="Source",
                            oauth_token=core.SecretValue(github_token),
                            owner='arron1993',
                            repo="arronmoore.com",
                            branch="develop")
                    ]),
                aws_codepipeline.StageProps(
                    stage_name='Build',
                    actions=[
                        aws_codepipeline_actions.CodeBuildAction(
                            action_name='DockerBuildImages',
                            input=source_output,
                            project=cb_docker_build,
                            run_order=1,
                        )
                    ])
            ])
Пример #5
0
 def __init__(self, scope):
     super().__init__(scope, "bug")
     bucket = Bucket.from_bucket_name(
         self, "artifacts", core.Fn.import_value("CodeArtifactsBucket")
     )
     pipeline_role = Role.from_role_arn(
         self, "pipeline", core.Fn.import_value("CodePipelineRole")
     )
     pipeline = Pipeline(
         self,
         "Pipeline",
         artifact_bucket=bucket,
         role=pipeline_role,
         stages=[
             StageProps(
                 stage_name="Source",
                 actions=[
                     GitHubSourceAction(
                         action_name="Source",
                         run_order=1,
                         oauth_token=core.SecretValue("something"),
                         output=Artifact(artifact_name="SourceArtifact"),
                         owner="me",
                         repo="repo",
                         branch="master",
                     )
                 ],
             )
         ],
     )
     pipeline.add_stage(
         stage_name="Fails",
         actions=[
             LambdaInvokeAction(
                 action_name="LambdaInvokeAction",
                 run_order=1,
                 lambda_=Function.from_function_arn(
                     self, "function", core.Fn.import_value("SomeFunction")
                 ),
             )
         ],
     )
Пример #6
0
    def __init__(self, scope: core.Construct, id: str,
                 pipeline: codepipeline.Pipeline, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.sourceOutput = codepipeline.Artifact()

        branch = id.split('-')[-1]
        if branch == 'develop':
            trigger = codepipeline_actions.GitHubTrigger('WEBHOOK')
        else:
            trigger = codepipeline_actions.GitHubTrigger('POLL')

        sourceAction = codepipeline_actions.GitHubSourceAction(
            action_name='GitHubSource-' + branch,
            owner=os.environ['GITHUB_OWNER'],
            repo=os.environ['GITHUB_REPO'],
            output=self.sourceOutput,
            branch=branch,
            oauth_token=core.SecretValue(os.environ['GITHUB_TOKEN']),
            trigger=trigger,
            run_order=1)

        pipeline.add_stage(stage_name='Source-' + branch,
                           actions=[sourceAction])
Пример #7
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 domain_name: str,
                 identity_provider_client_id: str,
                 identity_provider_client_secret: str,
                 identity_provider_client_url: str,
                 identity_provider_realm: str,
                 identity_provider_scope: str = 'openid',
                 vpc: ec2.IVpc = None,
                 cluster: ecs.ICluster = None,
                 load_balancer: elbv2.IApplicationLoadBalancer = None,
                 log_group: logs.ILogGroup = None,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        if vpc is None:
            vpc = ec2.Vpc(self, 'ApplicationkVpc')

        if cluster is None:
            cluster = ecs.Cluster(self, 'ApplicationCluster', vpc=vpc)

        if log_group is None:
            log_group = logs.LogGroup(
                self,
                'ApplicationLogGroup',
                retention=logs.RetentionDays.ONE_WEEK,
                removal_policy=core.RemovalPolicy.DESTROY)

        application_task_role = iam.Role(
            self,
            'ApplicationTaskRole',
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'))

        application_hosted_zone = route53.HostedZone.from_lookup(
            self, 'ApplicationHostedZone', domain_name=domain_name)

        application_certificate = acm.DnsValidatedCertificate(
            self,
            'FrontendAlbCertificate',
            hosted_zone=application_hosted_zone,
            domain_name='app.' + domain_name)

        application_service = ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            'ApplicationLoadBalancedFargateService',
            cluster=cluster,
            load_balancer=load_balancer,
            task_image_options=ecs_patterns.
            ApplicationLoadBalancedTaskImageOptions(
                image=ecs.ContainerImage.from_asset("application"),
                enable_logging=True,
                log_driver=ecs.AwsLogDriver(stream_prefix='application',
                                            log_group=log_group),
                task_role=application_task_role,
                container_port=8080,
            ),
            memory_limit_mib=512,
            cpu=256,
            desired_count=1,
            public_load_balancer=True,
            domain_name='app.' + domain_name,
            domain_zone=application_hosted_zone,
            protocol=elbv2.ApplicationProtocol.HTTPS,
        )

        application_service.target_group.enable_cookie_stickiness(
            core.Duration.seconds(24 * 60 * 60))
        application_service.target_group.configure_health_check(
            port='8080',
            path='/',
            timeout=core.Duration.seconds(20),
            healthy_threshold_count=2,
            unhealthy_threshold_count=10,
            interval=core.Duration.seconds(30),
        )

        application_service.listener.add_certificates(
            'ApplicationServiceCertificate',
            certificates=[application_certificate])

        application_service.listener.add_action(
            'DefaultAction',
            action=elbv2.ListenerAction.authenticate_oidc(
                authorization_endpoint=identity_provider_client_url +
                '/auth/realms/' + identity_provider_realm +
                '/protocol/openid-connect/auth',
                token_endpoint=identity_provider_client_url + '/auth/realms/' +
                identity_provider_realm + '/protocol/openid-connect/token',
                user_info_endpoint=identity_provider_client_url +
                '/auth/realms/' + identity_provider_realm +
                '/protocol/openid-connect/userinfo',
                issuer=identity_provider_client_url + '/auth/realms/' +
                identity_provider_realm,
                client_id=identity_provider_client_id,
                client_secret=core.SecretValue(
                    identity_provider_client_secret),
                scope=identity_provider_scope,
                on_unauthenticated_request=elbv2.UnauthenticatedAction.
                AUTHENTICATE,
                next=elbv2.ListenerAction.forward(
                    [application_service.target_group]),
            ))

        application_service.load_balancer.connections.allow_to_any_ipv4(
            port_range=ec2.Port(
                from_port=443,
                to_port=443,
                protocol=ec2.Protocol.TCP,
                string_representation='Allow ALB to verify token'))
Пример #8
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ZachDBInstanceClass= ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2,ec2.InstanceSize.MICRO)
        # TODO NEED MUTI SUBNET OF VPC
        ZachDBInstanceBelongVpc= ec2.Vpc.from_lookup(self,id="ZachDBInstanceBelongVpc",vpc_id="vpc-01e73b4b5c6f9f98a")
        ZachDBInstanceSubnet = ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC)
        ZachDBInstanceRole=iam.Role.from_role_arn(self,id="ZachRDSRole",role_arn="arn:aws:iam::098380756085:role/Zach_RDSFullPrivilege")
        mode = "HA"

        ZachDBInstance= rds.DatabaseInstance(self,id="ZachDB-A",database_name="ZachDB",
                                             instance_identifier="ZachDB",
                                             master_username="******",
                                             master_user_password=core.SecretValue(value="tsjr123!"),
                                             instance_class=ZachDBInstanceClass,
                                             engine=rds.DatabaseInstanceEngine.MYSQL,
                                             engine_version="5.7.22",
                                             auto_minor_version_upgrade=True,
                                             storage_type=rds.StorageType.STANDARD,
                                             allocated_storage=20,
                                             vpc=ZachDBInstanceBelongVpc,
                                             vpc_placement=ZachDBInstanceSubnet,
                                             port=33306,
                                             preferred_backup_window="17:00-22:00",  # UTC time
                                             backup_retention=core.Duration.days(35),
                                             deletion_protection=True,  # TODO IF YOU WANT TO DELETE DB , YOU NEED TO DISABLE THIS FIRST
                                             cloudwatch_logs_retention=aws_logs.RetentionDays.ONE_MONTH,
                                             cloudwatch_logs_retention_role=ZachDBInstanceRole,
                                             multi_az=False)

        ZachDBInstance.metric_database_connections(color="blue",label="ZachDB-A",period=core.Duration.minutes(2))
        ZachDB_CPU_Metric = ZachDBInstance.metric_cpu_utilization(color="red",label="CPU Util")
        ZachDB_RAM_Metric = ZachDBInstance.metric_freeable_memory(color="green",label="Memory Free")
        ZachDB_Disk_Metric = ZachDBInstance.metric_free_storage_space(color="purple",label="DiskSpace Free")
        # cw.Metric(metric_name="ZachDB-A",namespace="ZachDB-A")

        core.CfnOutput(self,id="ZachDB-A-ARN",value=ZachDBInstance.instance_arn)
        core.CfnOutput(self,id="ZachDB-A-VPC",value=ZachDBInstance.vpc.vpc_id)
        core.CfnOutput(self,id="ZachDB-A-ADDRESS",value=ZachDBInstance.db_instance_endpoint_address)
        core.CfnOutput(self,id="ZachDB-A-PORT",value=ZachDBInstance.db_instance_endpoint_port)

        if mode == "HA":
            ZachDBInstanceRead= rds.DatabaseInstanceReadReplica(self,id="ZachDB-A-read",database_name="ZachDB-A-read",
                                                 source_database_instance=ZachDBInstance,
                                                 master_user_password=core.SecretValue(value="tsjr123!"),
                                                 instance_class=ZachDBInstanceClass,
                                                 engine=rds.DatabaseInstanceEngine.MYSQL,
                                                 engine_version="5.7.22",
                                                 auto_minor_version_upgrade=True,
                                                 storage_type=rds.StorageType.STANDARD,
                                                 allocated_storage=20,
                                                 vpc=ZachDBInstanceBelongVpc,
                                                 vpc_placement=ZachDBInstanceSubnet,
                                                 port=33306,
                                                 preferred_backup_window="17:00-22:00",  # UTC time
                                                 backup_retention=core.Duration.days(35),
                                                 deletion_protection=True,
                                                 cloudwatch_logs_retention=aws_logs.RetentionDays.ONE_MONTH,
                                                 cloudwatch_logs_retention_role=ZachDBInstanceRole,
                                                 multi_az=False)

            core.CfnOutput(self,id="ZachDB-A-read-ARN",value=ZachDBInstanceRead.instance_arn)
            core.CfnOutput(self,id="ZachDB-A-read-VPC",value=ZachDBInstanceRead.vpc.vpc_id)
            core.CfnOutput(self,id="ZachDB-A-read-ADDRESS",value=ZachDBInstanceRead.db_instance_endpoint_address)
            core.CfnOutput(self,id="ZachDB-A-read-PORT",value=ZachDBInstanceRead.db_instance_endpoint_port)
Пример #9
0
 def to_string(self):
     return core.SecretValue(self.secret.secret_value.to_string())
Пример #10
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        #vpc = ec2.Vpc.from_lookup(self, 'VPC', is_default=True)

        vpc = ec2.Vpc(self, "MyVpc", max_azs=2)

        rdsInst = rds.DatabaseInstance(
            self,
            'SpringPetclinicDB',
            engine=rds.DatabaseInstanceEngine.MYSQL,
            instance_class=ec2.InstanceType('t2.medium'),
            master_username='******',
            database_name='petclinic',
            master_user_password=core.SecretValue('Welcome#123456'),
            vpc=vpc,
            deletion_protection=False,
            backup_retention=core.Duration.days(0),
            removal_policy=core.RemovalPolicy.DESTROY,
            #vpc_placement = ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC)
        )

        rdsInst.connections.allow_default_port_from_any_ipv4()

        cluster = ecs.Cluster(self, 'EcsCluster', vpc=vpc)

        asset = ecr_assets.DockerImageAsset(
            self,
            'spring-petclinic',
            directory='./docker/',
            build_args={
                'JAR_FILE': 'spring-petclinic-2.1.0.BUILD-SNAPSHOT.jar'
            })

        cluster.add_capacity(
            "DefaultAutoScalingGroup",
            instance_type=ec2.InstanceType('t2.large'),
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            min_capacity=2)

        ecs_service = ecs_patterns.ApplicationLoadBalancedEc2Service(
            self,
            "Ec2Service",
            cluster=cluster,
            memory_limit_mib=1024,
            service_name='spring-petclinic',
            desired_count=2,
            task_image_options={
                "image": ecs.ContainerImage.from_docker_image_asset(asset),
                "container_name": 'spring-petclinic',
                "container_port": 8080,
                "environment": {
                    'SPRING_DATASOURCE_PASSWORD':
                    '******',
                    'SPRING_DATASOURCE_USERNAME':
                    '******',
                    'SPRING_PROFILES_ACTIVE':
                    'mysql',
                    'SPRING_DATASOURCE_URL':
                    'jdbc:mysql://' + rdsInst.db_instance_endpoint_address +
                    '/petclinic?useUnicode=true'
                }
            })
Пример #11
0
    def __init__(self, scope: core.Construct, id: str, datalake: DataLakeLayer,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.__datalake = datalake
        self.security_group = ec2.SecurityGroup(
            self,
            'SecurityGroup',
            vpc=self.datalake.vpc,
            allow_all_outbound=True,
            description='SonarQube Security Group')

        self.security_group.add_ingress_rule(peer=ec2.Peer.any_ipv4(),
                                             connection=ec2.Port.all_traffic(),
                                             description='Allow any traffic')

        self.sonarqube_svr_ecr = ecr.DockerImageAsset(
            self,
            'Repo',
            directory=os.path.join(root_dir, 'images/sonarqube-server'),
            repository_name='sonarqube')

        self.sonarqube_cli_ecr = ecr.DockerImageAsset(
            self,
            'Cli',
            directory=os.path.join(root_dir, 'images/sonarqube-scanner'),
            repository_name='sonarqube-cli')

        self.database = rds.DatabaseCluster(
            self,
            'Database',
            engine=rds.DatabaseClusterEngine.aurora_postgres(
                version=rds.AuroraPostgresEngineVersion.VER_11_9),
            default_database_name='sonarqube',
            removal_policy=core.RemovalPolicy.DESTROY,
            credentials=rds.Credentials.from_username(
                username='******',
                password=core.SecretValue(value='postgres')),
            instance_props=rds.InstanceProps(
                vpc=self.datalake.vpc,
                security_groups=[self.security_group],
                instance_type=ec2.InstanceType('r6g.xlarge')))

        # self.ecs_cluster = ecs.Cluster(self,'SonarCluster',
        #   container_insights=True,
        #   vpc=self.datalake.vpc,
        #   capacity=ecs.AddCapacityOptions(
        #     machine_image_type= ecs.MachineImageType.AMAZON_LINUX_2,
        #     instance_type=ec2.InstanceType('m5.xlarge'),
        #     allow_all_outbound=True,
        #     associate_public_ip_address=False,
        #     vpc_subnets= ec2.SubnetSelection(subnet_type= ec2.SubnetType.PUBLIC),
        #     desired_capacity=2))

        # self.service = ecsp.ApplicationLoadBalancedEc2Service(self,'SonarEc2',
        #   cluster=self.ecs_cluster,
        #   desired_count=1,
        #   listener_port=80,
        #   memory_reservation_mib= 4 * 1024,
        #   task_image_options= ecsp.ApplicationLoadBalancedTaskImageOptions(
        #     image= ecs.ContainerImage.from_docker_image_asset(asset=self.sonarqube_svr_ecr),
        #     container_name='sonarqube-svr',
        #     container_port=9000,
        #     enable_logging=True,
        #     environment={
        #       '_SONAR_JDBC_URL':'jdbc:postgresql://{}/sonarqube'.format(
        #           self.database.cluster_endpoint.hostname),
        #       '_SONAR_JDBC_USERNAME':'******',
        #       '_SONAR_JDBC_PASSWORD':'******'
        #     }))

        self.service = ecsp.ApplicationLoadBalancedFargateService(
            self,
            'Server',
            assign_public_ip=True,
            vpc=self.datalake.vpc,
            desired_count=1,
            cpu=4096,
            memory_limit_mib=8 * 1024,
            listener_port=80,
            platform_version=ecs.FargatePlatformVersion.VERSION1_4,
            security_groups=[self.security_group, self.datalake.efs_sg],
            task_image_options=ecsp.ApplicationLoadBalancedTaskImageOptions(
                image=ecs.ContainerImage.from_docker_image_asset(
                    asset=self.sonarqube_svr_ecr),
                container_name='sonarqube-svr',
                container_port=9000,
                enable_logging=True,
                environment={
                    '_SONAR_JDBC_URL':
                    'jdbc:postgresql://{}/sonarqube'.format(
                        self.database.cluster_endpoint.hostname),
                    '_SONAR_JDBC_USERNAME':
                    '******',
                    '_SONAR_JDBC_PASSWORD':
                    '******'
                }))

        for name in ['AmazonElasticFileSystemClientFullAccess']:
            self.service.task_definition.task_role.add_managed_policy(
                iam.ManagedPolicy.from_aws_managed_policy_name(name))

        # Override container specific settings
        container = self.service.task_definition.default_container

        # Required to start remote sql
        container.add_ulimits(
            ecs.Ulimit(name=ecs.UlimitName.NOFILE,
                       soft_limit=262145,
                       hard_limit=262145))

        for folder in ['data', 'logs']:
            efs_ap = self.datalake.efs.add_access_point(
                'sonarqube-' + folder,
                create_acl=efs.Acl(owner_gid="0",
                                   owner_uid="0",
                                   permissions="777"),
                path='/sonarqube/' + folder)

            self.service.task_definition.add_volume(
                name=folder,
                efs_volume_configuration=ecs.EfsVolumeConfiguration(
                    file_system_id=self.datalake.efs.file_system_id,
                    transit_encryption='ENABLED',
                    authorization_config=ecs.AuthorizationConfig(
                        access_point_id=efs_ap.access_point_id,
                        iam='DISABLED')))

            container.add_mount_points(
                ecs.MountPoint(container_path='/opt/sonarqube/' + folder,
                               source_volume=folder,
                               read_only=False))
Пример #12
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # add user 1 with secret manager password
        user1_pass = _secretsmanager.Secret(
            self, "user1Pass", description="Password for user 1", secret_name="user1_pass"
        )

        user1 = _iam.User(self, "user1", password=user1_pass.secret_value, user_name="user1")

        # add user 2 with literal password
        user2 = _iam.User(
            self, "user2", password=core.SecretValue("dont-use-bad-password@123"), user_name="user2"
        )

        # add user 2 to group
        group1 = _iam.Group(self, "group1Id", group_name="group1")
        group1.add_user(user2)

        # add managed policy to group
        group1.add_managed_policy(
            _iam.ManagedPolicy.from_aws_managed_policy_name("AmazonS3ReadOnlyAccess")
        )

        # add inline policy - specific resource
        param = _ssm.StringParameter(
            self,
            "parameterId",
            description="parameter",
            parameter_name="/foo",
            string_value="bar",
            tier=_ssm.ParameterTier.STANDARD,
        )
        param.grant_read(group1)

        # add inline policy - list all parameters in console
        group_statement1 = _iam.PolicyStatement(
            sid="DescribeAllParameters",
            effect=_iam.Effect.ALLOW,
            resources=["*"],
            actions=["ssm:DescribeParameters"],
        )
        group1.add_to_policy(group_statement1)

        # create iam role
        ops_role = _iam.Role(
            self,
            "opsRole",
            assumed_by=_iam.AccountPrincipal(f"{core.Aws.ACCOUNT_ID}"),
            role_name="ops_role",
        )
        list_ec2_policy = _iam.ManagedPolicy(
            self,
            "listEc2Instances",
            description="list ec2 instances in the account",
            managed_policy_name="list_ec2_policy",
            statements=[
                _iam.PolicyStatement(
                    effect=_iam.Effect.ALLOW,
                    actions=["ec2:Describe*", "cloudwatch:Describe*", "cloudwatch:Get*"],
                    resources=["*"],
                )
            ],
            roles=[ops_role],
        )

        # login url autogeneration
        output1 = core.CfnOutput(
            self,
            "user2LoginUrl",
            description="Login for user 2",
            value=f"https://{core.Aws.ACCOUNT_ID}.signin.aws.amazon.com/console",
        )
Пример #13
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        #vpc = ec2.Vpc.from_lookup(self, 'VPC', is_default=True)
        
        vpc = ec2.Vpc(
            self, "MyVpc",
            max_azs=2
        )

        
        rdsInst = rds.DatabaseInstance(self, 'SpringPetclinicDB',
          engine=rds.DatabaseInstanceEngine.MYSQL,
          engine_version='5.7.31',
          instance_class=ec2.InstanceType('t2.medium'),
          master_username = '******',
          database_name = 'petclinic',
          master_user_password = core.SecretValue('Welcome#123456'),
          vpc = vpc,
          deletion_protection = False,
          backup_retention = core.Duration.days(0),
          removal_policy = core.RemovalPolicy.DESTROY,
          #vpc_placement = ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC)
          )

        rdsInst.connections.allow_default_port_from_any_ipv4()

        cluster = ecs.Cluster(
            self, 'EcsCluster',
            vpc=vpc
        )

        cluster.add_capacity("DefaultAutoScalingGroup",
                             instance_type=ec2.InstanceType('t2.large'),
                             vpc_subnets = ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
                             min_capacity = 6)

        alb = elbv2.ApplicationLoadBalancer(self, 'EcsLb', vpc=vpc, internet_facing=True)

        listener = alb.add_listener('EcsListener', port=80)

        listener.add_fixed_response('Default-Fix', status_code= '404')
        listener.node.default_child.default_action=[{
          "type": "fixed-response",
          "fixedResponseConfig": {"statusCode": "404"}
        }]

        for s in ['customers', 'vets', 'visits', 'static']:

            asset = ecr_assets.DockerImageAsset(self, 'spring-petclinic-' + s, 
              directory='./work/build/spring-petclinic-' + s + '-service',
              build_args={
                 'JAR_FILE': 'spring-petclinic-' + s + '-service-2.1.4.jar'
              })

            ecs_task = ecs.Ec2TaskDefinition(self, 'TaskDef-' + s)

            env={}

            if s != 'static':
                env = {
                  'SPRING_DATASOURCE_PASSWORD': '******',
                  'SPRING_DATASOURCE_USERNAME': '******',
                  'SPRING_PROFILES_ACTIVE': 'mysql',
                  'SPRING_DATASOURCE_URL': 'jdbc:mysql://' + rdsInst.db_instance_endpoint_address + '/petclinic?useUnicode=true',
                  'SERVER_SERVLET_CONTEXT_PATH': '/api/' + s.rstrip('s')
                }

            ecs_container = ecs_task.add_container(
				'Container-' + s,
				memory_limit_mib=512,
				image=ecs.ContainerImage.from_docker_image_asset(asset),
				logging=ecs.LogDriver.aws_logs(stream_prefix=s),
				environment=env
			)

            ecs_container.add_port_mappings(ecs.PortMapping(container_port=8080))

            ecs_service = ecs.Ec2Service(
	            self, 'Ec2Service-' + s,
	            cluster = cluster,
		        service_name = 'spring-petclinic-' + s,
		        desired_count = 2,
		        task_definition = ecs_task
	        )
            
            if s == 'static':
                parttern = '/*'
                priority = 1100
                check={'path': '/'}
            else:
                parttern = '/api/' + s.rstrip('s') + '/*'
                priority = randint(1, 1000)
                check={'path': '/api/' + s.rstrip('s') + '/manage'}

            target = listener.add_targets(
	        	'ECS-' + s,
	        	path_pattern=parttern,
	        	priority = priority,
	        	port=80, 
	        	targets=[ecs_service],
	        	health_check=check
	        )

        core.CfnOutput(self,"LoadBalancer",export_name="LoadBalancer",value=alb.load_balancer_dns_name)
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 db_stack: DatabaseStack, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)
        # Enrichment Queue
        enrichment_queue = sqs.Queue(
            self,
            "CrawlerEnrichmentQueue",
            queue_name='CrawlerEnrichmentQueue',
            retention_period=cdk.Duration.days(1),
            visibility_timeout=cdk.Duration.minutes(15))

        # Environment
        env_default = {'APP_LOGGING_LEVEL': 'ERROR'}
        env_table = {'APP_OFFERS_TABLE': db_stack.offers_table.table_name}
        env_queue_url = {'APP_OFFERS_QUEUE_URL': enrichment_queue.queue_url}

        # Base Lambda ECR image asset
        lambda_asset = ecr_assets.DockerImageAsset(self,
                                                   'CrawlerLambdaImage',
                                                   directory=os.path.join(
                                                       os.getcwd(), 'src',
                                                       'crawler'),
                                                   repository_name='crawler')

        # Crawler Lambda
        lambda_crawler = self._lambda_function_from_asset(
            lambda_asset, 'LambdaCrawler', 'lambda_handler.crawler', {
                **env_default,
                **env_table,
                **env_queue_url
            })
        rule = events.Rule(self,
                           'CrawlerCallingRule',
                           rule_name='CrawlerCallingRule',
                           schedule=events.Schedule.rate(
                               cdk.Duration.hours(1)))
        rule.add_target(targets.LambdaFunction(lambda_crawler))
        db_stack.offers_table.grant_write_data(lambda_crawler)
        enrichment_queue.grant_send_messages(lambda_crawler)

        # Enrichment Lambda
        lambda_enrichment = self._lambda_function_from_asset(
            lambda_asset, 'LambdaEnrichment', 'lambda_handler.enrichment', {
                **env_default,
                **env_table
            })
        lambda_enrichment.add_event_source(
            lambda_event_sources.SqsEventSource(enrichment_queue))
        db_stack.offers_table.grant_write_data(lambda_enrichment)

        lambda_search = self._lambda_function_from_asset(
            lambda_asset,
            'LambdaSearch',
            'lambda_handler.search', {
                **env_default,
                **env_table
            },
            reserved_concurrent_executions=10,
            timeout_minutes=1,
            memory_size=128,
            max_event_age_minutes=1)
        db_stack.offers_table.grant_read_data(lambda_search)

        personal_token = open(
            os.path.join(str(Path.home()), '.github/personal_token.txt'),
            'r').read()

        # Frontend entrypoin
        amplify_app = amplify.App(
            self,
            'CrawlerFrontend',
            app_name='CrawlerFrontend',
            auto_branch_creation=amplify.AutoBranchCreation(auto_build=True),
            source_code_provider=amplify.GitHubSourceCodeProvider(
                owner='jaswdr',
                repository='aws-cdk-crawler-frontend-example',
                oauth_token=cdk.SecretValue(personal_token)))

        # Backend entrypoint
        search_entrypoint = gateway.HttpApi(
            self,
            'CrawlerSearchApiEntrypoint',
            api_name='CrawlerSearchApiEntrypoint',
            cors_preflight=gateway.CorsPreflightOptions(
                allow_headers=['*'],
                allow_methods=[gateway.HttpMethod.GET],
                allow_origins=['*'],
                max_age=cdk.Duration.hours(2)),
            description='Crawler Search API Entrypoint')
        search_entrypoint.add_routes(
            path='/search',
            methods=[gateway.HttpMethod.GET],
            integration=gateway_integrations.LambdaProxyIntegration(
                handler=lambda_search,
                payload_format_version=gateway.PayloadFormatVersion.VERSION_2_0
            ))
        static_data_bucket = s3.Bucket(
            self,
            'CrawlerStaticDataBucket',
            versioned=True,
            removal_policy=cdk.RemovalPolicy.DESTROY,
            auto_delete_objects=True,
            bucket_name='crawler-static-data')
Пример #15
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        # super().__init__(scope, id, context, outdir)

        print()
        # Common Stack Tags
        for k, v in constants.COMMON_TAGS.items():
            core.Tag.add(self, key=k, value=v)

        # Hosted Zone
        if constants.HOSTED_ZONE["id"]:
            hosted_zone = aws_route53.HostedZone.from_hosted_zone_attributes(
                self,
                "ImportedHostedZone",
                hosted_zone_id=constants.HOSTED_ZONE["id"],
                zone_name=constants.HOSTED_ZONE["name"])
        else:
            hosted_zone = aws_route53.HostedZone(
                self,
                "MainHostedZone",
                zone_name=constants.HOSTED_ZONE["name"],
                comment="Hosted Zone for {}".format(
                    constants.HOSTED_ZONE["name"]),
            )

        # ACM Certificate
        if constants.CERTIFICATE["arn"]:
            acm_certificate = aws_certificatemanager.Certificate.from_certificate_arn(
                self,
                "ImportedCertificate",
                certificate_arn=constants.CERTIFICATE["arn"])
        else:
            acm_certificate = aws_certificatemanager.DnsValidatedCertificate(
                self,
                "CloudFrontCertificate",
                hosted_zone=hosted_zone,
                region=constants.CERTIFICATE["region"],
                domain_name=constants.HOSTED_ZONE["domain"],
                subject_alternative_names=constants.CERTIFICATE["alt_domains"],
                validation_method=aws_certificatemanager.ValidationMethod.DNS)
            acm_certificate.node.add_dependency(hosted_zone)

        # Website Bucket
        website_bucket = aws_s3.Bucket(
            self,
            "WebsiteBucket",
            encryption=aws_s3.BucketEncryption.S3_MANAGED,
            removal_policy=core.RemovalPolicy.DESTROY)

        # Cloudfront Origin Access Identity (OAI)
        website_bucket_oai = aws_cloudfront.CfnCloudFrontOriginAccessIdentity(
            self,
            "CloudfrontOAI",
            cloud_front_origin_access_identity_config=aws_cloudfront.
            CfnCloudFrontOriginAccessIdentity.
            CloudFrontOriginAccessIdentityConfigProperty(
                comment="CloudFrontOAIFor{}".format(
                    constants.PROJECT_CODE.capitalize())))

        # Canonical User Principal of OAI
        oai_canonical_user_principal = aws_iam.CanonicalUserPrincipal(
            website_bucket_oai.attr_s3_canonical_user_id)

        # Website Bucket Policy
        website_bucket.add_to_resource_policy(
            aws_iam.PolicyStatement(
                actions=["s3:GetObject"],
                resources=[website_bucket.arn_for_objects("*")],
                principals=[oai_canonical_user_principal],
                effect=aws_iam.Effect.ALLOW))

        # Adopt Lambda Function
        lambda_function = aws_lambda.Function.from_function_arn(
            self,
            "UrlRewriteFunction",
            function_arn=constants.URL_REWRITE_FUNCTION_ARN)

        lambda_function_version_arn = aws_lambda.Version.from_version_arn(
            self,
            "LambdaFunctionArn",
            version_arn=constants.URL_REWRITE_FUNCTION_VERSION_ARN)

        # CloudFront Web Distribution
        cloudfront_distribution = aws_cloudfront.CloudFrontWebDistribution(
            self,
            "CloudFrontDistribution",
            comment="waqqas.tech",
            default_root_object="index.html",
            viewer_protocol_policy=aws_cloudfront.ViewerProtocolPolicy.
            REDIRECT_TO_HTTPS,
            alias_configuration=aws_cloudfront.AliasConfiguration(
                acm_cert_ref=acm_certificate.certificate_arn,
                security_policy=aws_cloudfront.SecurityPolicyProtocol.
                TLS_V1_2_2018,
                names=constants.CLOUDFRONT["alt_domains"]),
            origin_configs=[
                aws_cloudfront.SourceConfiguration(
                    s3_origin_source=aws_cloudfront.S3OriginConfig(
                        s3_bucket_source=website_bucket,
                        origin_access_identity_id=website_bucket_oai.ref),
                    behaviors=[
                        aws_cloudfront.Behavior(
                            allowed_methods=aws_cloudfront.
                            CloudFrontAllowedMethods.GET_HEAD_OPTIONS,
                            cached_methods=aws_cloudfront.
                            CloudFrontAllowedCachedMethods.GET_HEAD,
                            compress=True,
                            is_default_behavior=True,
                            path_pattern="*",
                            default_ttl=core.Duration.seconds(
                                amount=constants.CLOUDFRONT['default_ttl']),
                            lambda_function_associations=[
                                aws_cloudfront.LambdaFunctionAssociation(
                                    event_type=aws_cloudfront.
                                    LambdaEdgeEventType.ORIGIN_REQUEST,
                                    lambda_function=lambda_function_version_arn
                                )
                            ])
                    ])
            ])

        # CloudFront Route53 Record
        primary_dns_record = aws_route53.ARecord(
            self,
            "PrimaryDNSRecord",
            zone=hosted_zone,
            comment="{} CloudFront Dist Alias Record".format(
                constants.PROJECT_CODE),
            record_name="{}.".format(constants.HOSTED_ZONE["domain"]),
            target=aws_route53.RecordTarget.from_alias(
                aws_route53_targets.CloudFrontTarget(cloudfront_distribution)),
            ttl=core.Duration.seconds(
                amount=constants.CLOUDFRONT["default_ttl"]),
        )

        # Artifact Bucket
        artifact_bucket = aws_s3.Bucket(
            self,
            "ArtifactBucket",
            encryption=aws_s3.BucketEncryption.S3_MANAGED,
            removal_policy=core.RemovalPolicy.DESTROY)

        # CodeBuild
        codebuild_environment_variables = aws_codebuild.BuildEnvironmentVariable(
            value=website_bucket.bucket_name)
        codebuild_environment = aws_codebuild.BuildEnvironment(
            build_image=aws_codebuild.LinuxBuildImage.
            UBUNTU_14_04_PYTHON_3_7_1,
            compute_type=aws_codebuild.ComputeType.SMALL)
        codebuild_buildspec = aws_codebuild.BuildSpec.from_object(
            value=buildspec.BUILDSPEC)
        codebuild_project = aws_codebuild.PipelineProject(
            self,
            "CodeBuildProject",
            environment_variables={
                "BUCKET_NAME": codebuild_environment_variables
            },
            environment=codebuild_environment,
            build_spec=codebuild_buildspec,
            description="CodeBuild Project for {} Content".format(
                constants.PROJECT_CODE),
            timeout=core.Duration.seconds(amount=300))
        # TODO: Lock down permissions for buckets
        codebuild_project.add_to_role_policy(
            aws_iam.PolicyStatement(actions=["s3:*"],
                                    effect=aws_iam.Effect.ALLOW,
                                    resources=[
                                        website_bucket.arn_for_objects("*"),
                                        artifact_bucket.arn_for_objects("*"),
                                        website_bucket.bucket_arn,
                                        artifact_bucket.bucket_arn,
                                    ]))
        # Codepipeline
        codepipeline = aws_codepipeline.Pipeline(
            self,
            "CodePipelineWebsiteContent",
            artifact_bucket=artifact_bucket,
            stages=[
                aws_codepipeline.StageProps(
                    stage_name="Source",
                    actions=[
                        aws_codepipeline_actions.GitHubSourceAction(
                            oauth_token=core.SecretValue(
                                value=constants.GITHUB_OAUTH_TOKEN),
                            output=aws_codepipeline.Artifact(
                                artifact_name="source"),
                            owner=constants.GITHUB_USER_NAME,
                            repo=constants.GITHUB_REPO_NAME,
                            branch=constants.BRANCH_NAME,
                            action_name="GithubSource",
                            trigger=aws_codepipeline_actions.GitHubTrigger.
                            WEBHOOK)
                    ]),
                aws_codepipeline.StageProps(
                    stage_name="Build",
                    actions=[
                        aws_codepipeline_actions.CodeBuildAction(
                            input=aws_codepipeline.Artifact(
                                artifact_name="source"),
                            project=codebuild_project,
                            type=aws_codepipeline_actions.CodeBuildActionType.
                            BUILD,
                            action_name="HugoBuild")
                    ])
            ])
        # TODO: Lock down permissions for buckets
        codepipeline.add_to_role_policy(
            aws_iam.PolicyStatement(actions=["s3:*"],
                                    effect=aws_iam.Effect.ALLOW,
                                    resources=[
                                        website_bucket.arn_for_objects("*"),
                                        artifact_bucket.arn_for_objects("*"),
                                        website_bucket.bucket_arn,
                                        artifact_bucket.bucket_arn,
                                    ]))