def create_hosted_zone(self, logical_id, resource_name, **kwargs):
     '''
     Create Hosted Zone using aws_cdk aws_route53.HostedZone method
     '''
     return aws_route53.HostedZone(self,
                                   logical_id,
                                   zone_name=kwargs['kwargs']['zone_name'])
示例#2
0
def create_hosted_zone(scope: InfraStack) -> route53.HostedZone:
    domain = scope.context.domain_name
    hosted_zone = route53.HostedZone(
        scope,
        id=domain,
        vpcs=None,
        comment=None,
        query_logs_log_group_arn=None,
        zone_name=domain,
    )

    route53.MxRecord(
        scope,
        scope.context.construct_id("MX-Gmail-1"),
        values=[
            route53.MxRecordValue(host_name='ASPMX.L.GOOGLE.COM', priority=1),
            route53.MxRecordValue(host_name='ALT1.ASPMX.L.GOOGLE.COM',
                                  priority=5),
            route53.MxRecordValue(host_name='ALT2.ASPMX.L.GOOGLE.COM',
                                  priority=5),
            route53.MxRecordValue(host_name='ALT3.ASPMX.L.GOOGLE.COM',
                                  priority=10),
            route53.MxRecordValue(host_name='ALT4.ASPMX.L.GOOGLE.COM',
                                  priority=10),
        ],
        zone=hosted_zone,
        ttl=Duration.seconds(3600),
    )

    return hosted_zone
示例#3
0
    def __init__(self, scope: core.Construct, id: str, cdnid,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        prj_name = self.node.try_get_context("project_name")
        env_name = self.node.try_get_context("env")

        hosted_zone = r53.HostedZone(self,
                                     'hosted-zone',
                                     zone_name='cloudevangelist.ca')

        r53.ARecord(self,
                    'cdn-record',
                    zone=hosted_zone,
                    target=r53.RecordTarget.from_alias(
                        alias_target=r53target.CloudFrontTarget(cdnid)),
                    record_name='app')

        ssm.StringParameter(self,
                            'zone-id',
                            parameter_name='/' + env_name + '/zone-id',
                            string_value=hosted_zone.hosted_zone_id)
示例#4
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Global variables to import cdk contexts from cdk.json
        _vpcID = self.node.try_get_context("VpcId")
        _sm_password = self.node.try_get_context("Secret_domain_password_arn")
        _dname = self.node.try_get_context("Domain_name")
        _subnet1 = self.node.try_get_context("Subnet1")
        _subnet2 = self.node.try_get_context("Subnet2")
        _sm_ec2keypair = self.node.try_get_context("Secret_keypair_arn")
        _ec2instance = self.node.try_get_context("Instance_type")

        # Import Vpc from the existing one in the AWS Account
        Vpc = _ec2.Vpc.from_lookup(self, "ImportVPC", vpc_id=_vpcID)
        Subnet1 = _ec2.Subnet.from_subnet_attributes(
            self,
            "subnetfromADManagedAD",
            subnet_id=_subnet1[0],
            availability_zone=_subnet1[1])

        # Import a Secret Manager Secret for Domain Password
        secret_adpassword = _sm.Secret.from_secret_arn(self,
                                                       "AdPasswordSecretStore",
                                                       secret_arn=_sm_password)

        #Import a Secret Manager Secre for EC2 KeyPair
        secret_ec2keypair = _sm.Secret.from_secret_arn(
            self, "ImportEC2KeyPairSecretStore", secret_arn=_sm_ec2keypair)

        # Create an AWS Managed AD Service in STANDARD Version
        ad = _ds.CfnMicrosoftAD(
            self,
            "ManagedAD",
            name=_dname,
            password=secret_adpassword.secret_value_from_json(
                "Key").to_string(),
            edition="Standard",
            vpc_settings={
                "vpcId": _vpcID,
                "subnetIds": [_subnet1[0], _subnet2[0]]
            })

        self.directory = ad

        # Create r53 hosted Zone for DNS DomainName
        hostedzone = _r53.HostedZone(self,
                                     "HostedZoneforAD",
                                     zone_name=_dname,
                                     vpcs=[Vpc])

        # Get the DNS IPs from AWS Managed AD
        targetip = _r53.RecordTarget(values=ad.attr_dns_ip_addresses)

        # Create A Record on Route 53 to point to AWS Managed AD IPs to later EC2 to join Domain
        r53Arecord = _r53.ARecord(self,
                                  "RecordAforAD",
                                  target=targetip,
                                  zone=hostedzone)

        # Create Policy to EC2JoinDomain Role
        ec2ssmpolicy = _iam.PolicyDocument(statements=[
            _iam.PolicyStatement(actions=[
                "ssm:DescribeAssociation", "ssm:GetDocument",
                "ssm:DescribeDocument", "ssm:GetManifest", "ssm:GetParameters",
                "ssm:ListAssociations", "ssm:ListInstanceAssociations",
                "ssm:UpdateAssociationStatus",
                "ssm:UpdateInstanceAssociationStatus",
                "ssm:UpdateInstanceInformation"
            ],
                                 resources=["*"]),
            _iam.PolicyStatement(actions=[
                "ssmmessages:CreateControlChannel",
                "ssmmessages:CreateDataChannel",
                "ssmmessages:OpenControlChannel", "ssmmessages:OpenDataChannel"
            ],
                                 resources=["*"]),
            _iam.PolicyStatement(actions=[
                "ec2messages:AcknowledgeMessage", "ec2messages:DeleteMessage",
                "ec2messages:FailMessage", "ec2messages:GetEndpoint",
                "ec2messages:GetMessages", "ec2messages:SendReply"
            ],
                                 resources=["*"]),
            _iam.PolicyStatement(actions=["ec2:DescribeInstanceStatus"],
                                 resources=["*"]),
            _iam.PolicyStatement(actions=["secretsmanager:GetSecretValue"],
                                 resources=["{}".format(_sm_password)])
        ])

        # Create role "EC2JoinDomain" to apply on Windows EC2JoinDomain (EC2)
        ssmrole = _iam.Role(
            self,
            "SSMRoleforEC2",
            assumed_by=_iam.ServicePrincipal('ec2.amazonaws.com'),
            inline_policies={"EC2SSMPolicy": ec2ssmpolicy},
            role_name="EC2JoinDomain")

        # Create Policy to workspaces_DefaultRole Role
        wsdefaultpolicy = _iam.PolicyDocument(statements=[
            _iam.PolicyStatement(actions=[
                "ec2:CreateNetworkInterface", "ec2:DeleteNetworkInterface",
                "ec2:DescribeNetworkInterfaces"
            ],
                                 resources=["*"]),
            _iam.PolicyStatement(actions=[
                "workspaces:RebootWorkspaces", "workspaces:RebuildWorkspaces",
                "workspaces:ModifyWorkspaceProperties"
            ],
                                 resources=["*"])
        ])

        # Create role workspaces_DefaultRole for later WorkSpaces API usage
        wsrole = _iam.Role(
            self,
            "WorkSpacesDefaultRole",
            assumed_by=_iam.ServicePrincipal('workspaces.amazonaws.com'),
            inline_policies={"WorkSpacesDefaultPolicy": wsdefaultpolicy},
            role_name="workspaces_DefaultRole")

        # Create a security group for RDP access on Windows EC2JoinDomain (EC2)
        rdpsg = _ec2.SecurityGroup(
            self,
            "SGForRDP",
            vpc=Vpc,
            description=
            "The Secrurity Group from local environment to Windows EC2 Instance"
        )

        rdpsg.add_ingress_rule(peer=_ec2.Peer.ipv4("192.168.1.1/32"),
                               connection=_ec2.Port.tcp(3389))

        # Create Windows EC2JoinDomain (EC2) as AD Admin server
        adadminEC2 = _ec2.Instance(
            self,
            "WindowsEC2",
            instance_type=_ec2.InstanceType(
                instance_type_identifier=_ec2instance),
            machine_image=_ec2.MachineImage.latest_windows(
                version=_ec2.WindowsVersion.
                WINDOWS_SERVER_2016_ENGLISH_FULL_BASE),
            vpc=Vpc,
            key_name=secret_ec2keypair.secret_value_from_json(
                "Key").to_string(),
            role=ssmrole,
            security_group=rdpsg,
            vpc_subnets=_ec2.SubnetSelection(subnets=[Subnet1]))

        adadminEC2.instance.add_depends_on(ad)

        # Create a SSM Parameter Store for Domain Name
        domain = _ssm.StringParameter(self,
                                      "ADDomainName",
                                      parameter_name="ad_join_domain_name",
                                      string_value=ad.name)

        # Create a SSM Parameter Store for Domain User
        aduser = _ssm.StringParameter(self,
                                      "ADDomainUser",
                                      parameter_name="ad_join_domain_user",
                                      string_value="Admin")

        domain.node.add_dependency(ad)
        aduser.node.add_dependency(ad)

        # Create SSM Document to join Window EC2 into AD
        ssmdocument = _ssm.CfnDocument(
            self,
            "SSMDocumentJoinAD",
            document_type="Command",
            name="SSMDocumentJoinAD",
            content={
                "description":
                "Run a PowerShell script to domain join a Windows instance securely",
                "schemaVersion":
                "2.0",
                "mainSteps": [{
                    "action": "aws:runPowerShellScript",
                    "name": "runPowerShellWithSecureString",
                    "inputs": {
                        "runCommand": [
                            "# Example PowerShell script to domain join a Windows instance securely",
                            "# Adopt the document from AWS Blog Join a Microsoft Active Directory Domain with Parameter Store and Amazon EC2 Systems Manager Documents",
                            "", "$ErrorActionPreference = 'Stop'", "", "try{",
                            "    # Parameter names",
                            "    # $dnsParameterStore = ([System.Net.Dns]::GetHostAddresses({}).IPAddressToString[0])"
                            .format(domain.parameter_name),
                            "    $domainNameParameterStore = \"{}\"".format(
                                domain.parameter_name),
                            "    $domainJoinUserNameParameterStore = \"{}\"".
                            format(aduser.parameter_name),
                            "    $domainJoinPasswordParameterStore = \"{}\"".
                            format(secret_adpassword.secret_arn), "",
                            "    # Retrieve configuration values from parameters",
                            "    $ipdns = ([System.Net.Dns]::GetHostAddresses(\"{}\").IPAddressToString[0])"
                            .format(_dname),
                            "    $domain = (Get-SSMParameterValue -Name $domainNameParameterStore).Parameters[0].Value",
                            "    $username = $domain + \"\\\" + (Get-SSMParameterValue -Name $domainJoinUserNameParameterStore).Parameters[0].Value",
                            "    $password = ((Get-SECSecretValue -SecretId $domainJoinPasswordParameterStore ).SecretString | ConvertFrom-Json ).Key | ConvertTo-SecureString -asPlainText -Force ",
                            "",
                            "    # Create a System.Management.Automation.PSCredential object",
                            "    $credential = New-Object System.Management.Automation.PSCredential($username, $password)",
                            "",
                            "    # Determine the name of the Network Adapter of this machine",
                            "    $networkAdapter = Get-WmiObject Win32_NetworkAdapter -Filter \"AdapterType = 'Ethernet 802.3'\"",
                            "    $networkAdapterName = ($networkAdapter | Select-Object -First 1).NetConnectionID",
                            "",
                            "    # Set up the IPv4 address of the AD DNS server as the first DNS server on this machine",
                            "    netsh.exe interface ipv4 add dnsservers name=$networkAdapterName address=$ipdns index=1",
                            "", "    # Join the domain and reboot",
                            "    Add-Computer -DomainName $domain -Credential $credential",
                            "    Restart-Computer -Force", "}",
                            "catch [Exception]{",
                            "    Write-Host $_.Exception.ToString()",
                            "    Write-Host 'Command execution failed.'",
                            "    $host.SetShouldExit(1)", "}"
                        ]
                    }
                }]
            })

        # Create SSM Associate to trigger SSM doucment to let Windows EC2JoinDomain (EC2) join Domain
        ssmjoinad = _ssm.CfnAssociation(self,
                                        "WindowJoinAD",
                                        name=ssmdocument.name,
                                        targets=[{
                                            "key":
                                            "InstanceIds",
                                            "values": [adadminEC2.instance_id]
                                        }])

        ssmjoinad.add_depends_on(ssmdocument)

        # Create a Policy for Lambda Role
        lambdapolicy = _iam.PolicyDocument(statements=[
            _iam.PolicyStatement(actions=["logs:CreateLogGroup"],
                                 resources=[
                                     "arn:aws:logs:{}:{}:*".format(
                                         self.region, self.account)
                                 ]),
            _iam.PolicyStatement(
                actions=["logs:CreateLogStream", "logs:PutLogEvents"],
                resources=[
                    "arn:aws:logs:{}:{}:log-group:/aws/lambda/*".format(
                        self.region, self.account)
                ]),
            _iam.PolicyStatement(actions=[
                "workspaces:RegisterWorkspaceDirectory",
                "workspaces:DeregisterWorkspaceDirectory",
                "ds:DescribeDirectories", "ds:AuthorizeApplication",
                "ds:UnauthorizeApplication", "iam:GetRole",
                "ec2:DescribeInternetGateways", "ec2:DescribeVpcs",
                "ec2:DescribeRouteTables", "ec2:DescribeSubnets",
                "ec2:DescribeNetworkInterfaces",
                "ec2:DescribeAvailabilityZones", "ec2:CreateSecurityGroup",
                "ec2:CreateTags"
            ],
                                 resources=["*"])
        ])

        # Creare a IAM Role for Lambda
        lambdarole = _iam.Role(
            self,
            "LambdaRoleForRegisterDS",
            assumed_by=_iam.ServicePrincipal('lambda.amazonaws.com'),
            inline_policies={"LambdaActicateDS": lambdapolicy},
            role_name="LambdaActivateDirectoryService")

        # Create a Lambda function to Register Directory Service on WorkSpaces
        dslambda = _lambda.Function(self,
                                    "LambdaStackForDSFunction",
                                    runtime=_lambda.Runtime.PYTHON_3_7,
                                    handler="workspaceds.handler",
                                    role=lambdarole,
                                    code=_lambda.Code.asset('lambda'),
                                    environment={"DIRECTORY_ID": ad.ref},
                                    timeout=core.Duration.seconds(120))
        # Create a customResource to trigger Lambda function after Lambda function is created
        _cf.CfnCustomResource(self,
                              "InvokeLambdaFunction",
                              service_token=dslambda.function_arn)
示例#5
0
    def __init__(self, scope: core.Construct, id: str, region, domain,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # VPC , we need one for ECS cluster ( sadly )
        vpc = ec2.Vpc.from_lookup(self, 'vpc', is_default=True)

        cluster = ecs.Cluster(self, 'Cluster', vpc=vpc)

        # Route53 & SSL Certificate
        zone = dns.HostedZone(self, "dns", zone_name=domain)

        dns.ARecord(self,
                    'MinecraftRecord',
                    zone=zone,
                    record_name='minecraft',
                    target=dns.RecordTarget(values=['1.2.3.4']))

        cert = acm.Certificate(
            self,
            'cert',
            domain_name=f'*.{domain}',
            validation=acm.CertificateValidation.from_dns(zone))

        # ECS ( Cluster, EFS, Task Def)
        fs = efs.FileSystem(self,
                            'EFS',
                            vpc=vpc,
                            removal_policy=core.RemovalPolicy.DESTROY)

        task_definition = ecs.FargateTaskDefinition(self,
                                                    'TaskDef',
                                                    memory_limit_mib=4096,
                                                    cpu=1024)

        container = task_definition.add_container(
            'MinecraftDocker',
            image=ecs.ContainerImage.from_registry('darevee/minecraft-aws'),
            logging=ecs.AwsLogDriver(stream_prefix='Minecraf'),
            cpu=1024,
            memory_limit_mib=4096)
        container.add_mount_points(
            ecs.MountPoint(container_path='/minecraft',
                           source_volume='efs',
                           read_only=False))
        cfn_task = container.task_definition.node.default_child
        cfn_task.add_property_override("Volumes", [{
            "EFSVolumeConfiguration": {
                "FilesystemId": fs.file_system_id
            },
            "Name": "efs"
        }])

        container.add_port_mappings(ecs.PortMapping(container_port=25565))

        sg = ec2.SecurityGroup(self, 'sg', vpc=vpc)
        sg.add_ingress_rule(peer=ec2.Peer.any_ipv4(),
                            connection=ec2.Port.tcp(25565),
                            description='Minecraft Access')
        sg.add_ingress_rule(peer=ec2.Peer.any_ipv4(),
                            connection=ec2.Port.tcp(25575),
                            description='RCONN Access')

        fs.connections.allow_default_port_from(sg)

        subnets = ",".join(vpc.select_subnets().subnet_ids)

        # Cognito ( For ApiGW Authentication)
        userpool = cognito.UserPool(
            self,
            'UserPool',
            user_invitation=cognito.UserInvitationConfig(
                email_body=
                """No cześć {username}, zostałeś zaproszony do naszego Minecraft!
                Twoje tymczasowe hasło to {####}
                """,
                email_subject="Zaproszenie do minecrafta"))

        # APIGW (Gateway, Lambdas, S3 Static content)

        # Lambda Starter
        starter = _lambda.Function(self,
                                   'Starter',
                                   runtime=_lambda.Runtime.PYTHON_3_8,
                                   handler='index.lambda_handler',
                                   code=_lambda.Code.asset('lambda/starter'),
                                   timeout=core.Duration.seconds(300),
                                   environment={
                                       'cluster': cluster.cluster_name,
                                       'subnets': subnets,
                                       'security_groups': sg.security_group_id,
                                       'task_definition':
                                       task_definition.task_definition_arn,
                                       'region': region,
                                       'zone_id': zone.hosted_zone_id,
                                       'domain': domain
                                   })

        starter.add_to_role_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=["*"],
                                actions=[
                                    "ecs:ListTasks", "ecs:DescribeTasks",
                                    "ec2:DescribeNetworkInterfaces"
                                ]))
        starter.add_to_role_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                resources=[task_definition.task_definition_arn],
                actions=["ecs:RunTask", "ecs:DescribeTasks"]))
        starter.add_to_role_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=[
                                    task_definition.task_role.role_arn,
                                    task_definition.execution_role.role_arn
                                ],
                                actions=["iam:PassRole"]))

        starter.add_to_role_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=[zone.hosted_zone_arn],
                                actions=["route53:ChangeResourceRecordSets"]))

        # S3 static webpage
        bucket = s3.Bucket(self,
                           "S3WWW",
                           public_read_access=True,
                           removal_policy=core.RemovalPolicy.DESTROY,
                           website_index_document="index.html")
        s3d.BucketDeployment(self,
                             "S3Deploy",
                             destination_bucket=bucket,
                             sources=[s3d.Source.asset("static_page")])

        status = _lambda.Function(self,
                                  'Status',
                                  runtime=_lambda.Runtime.PYTHON_3_8,
                                  handler='index.lambda_handler',
                                  code=_lambda.Code.asset('lambda/status'),
                                  environment={
                                      'url': f"https://minecrafter.{domain}",
                                      'domain': domain
                                  })

        # ApiGW
        apigw = api.LambdaRestApi(self,
                                  'ApiGW',
                                  handler=status,
                                  proxy=False,
                                  domain_name={
                                      "domain_name": f'minecrafter.{domain}',
                                      "certificate": cert
                                  },
                                  default_cors_preflight_options={
                                      "allow_origins": api.Cors.ALL_ORIGINS,
                                      "allow_methods": api.Cors.ALL_METHODS
                                  })

        start = apigw.root.add_resource('start')
        start.add_method('ANY', integration=api.LambdaIntegration(starter))

        apigw.root.add_method('ANY')

        dns.ARecord(self,
                    'PointDNSToApiGW',
                    zone=zone,
                    target=dns.RecordTarget.from_alias(
                        targets.ApiGateway(apigw)),
                    record_name=f"minecrafter.{domain}")
    def __init__(self, scope: core.Construct, id: str, domain: DomainProps,
                 website: WebsiteConstruct, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        zone = route53.HostedZone(self,
                                  'Zone',
                                  zone_name=domain.domain_name,
                                  comment='Managed by CDK')

        root_record_name = domain.domain_name
        wildcard_record_name = '*.{}'.format(root_record_name)

        website_target = route53.AddressRecordTarget.from_alias(
            alias_target=route53_targets.CloudFrontTarget(
                website.distribution))

        route53.ARecord(
            self,
            'RootIpv4',
            record_name=root_record_name,
            zone=zone,
            target=website_target,
        )

        route53.AaaaRecord(
            self,
            'RootIpv6',
            record_name=root_record_name,
            zone=zone,
            target=website_target,
        )

        route53.ARecord(
            self,
            'WildcardIpv4',
            record_name=wildcard_record_name,
            zone=zone,
            target=website_target,
        )

        route53.AaaaRecord(
            self,
            'WildcardIpv6',
            record_name=wildcard_record_name,
            zone=zone,
            target=website_target,
        )

        route53.MxRecord(
            self,
            'Email',
            record_name=root_record_name,
            zone=zone,
            values=domain.mx_record_values,
        )

        route53.TxtRecord(
            self,
            'Text',
            record_name=root_record_name,
            zone=zone,
            values=domain.txt_record_values,
        )
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        branch = kwargs['env']['branch']
        # create s3 bucket for CodeCommit artifacts
        s3_bucket = S3_Bucket(self, 's3-' + branch)

        # Create IAM roles for git merge and CodeBuild
        roles = Roles(self, 'roles-' + branch)

        # Define New Codepipeline
        pipeline = ServicePipeline(self,
                                   'pipeline-' + branch,
                                   bucket=s3_bucket.ArtifactBucket)

        # Create GitHub Account
        github = GitHub(self,
                        'GitHubSource-' + branch,
                        pipeline=pipeline.pipeline)

        # Create ECR Repo
        ecr_repo = ECRRepo(
            self,
            'ECRRepo-' + branch,
        )

        # Create CodeBuild
        GitCodeBuild(
            self,
            'CodeBuild-' + branch,
            source=github.sourceOutput,
            pipeline=pipeline.pipeline,
            bucket=s3_bucket.ArtifactBucket,
            role=roles.CodeBuildServiceRole,
            frontend=ecr_repo.flask.repository_uri,
        )

        # Create VPC for the ecs
        vpc = ec2.Vpc(
            self,
            "MyVPC-" + branch,
            max_azs=2,
        )

        # Create ECS cluster
        cluster = ecs.Cluster(
            self,
            'EC2-Cluster-' + branch,
            vpc=vpc,
        )

        # Add Auto Scaling Group
        for i in range(3):
            cluster.add_capacity(
                "DefaultAutoScalingGroup-" + str(i) + '-' + branch,
                instance_type=ec2.InstanceType("t2.medium"),
                allow_all_outbound=True,
                key_name=os.environ['KEYNAME'],
                # vpc_subnets=vpc.public_subnets
            )

        if branch == 'master':
            # Add HostedZone
            hosted_zone = route53.HostedZone(
                self,
                'hosted-zone-' + branch,
                # hosted_zone_id='Z3HNUDRBTJMWFV',
                zone_name='wiki-trend.com')
            domain_name = 'wiki-trend.com'
        else:
            hosted_zone = None
            domain_name = None

        # Add Load Balancer
        ecs_service = ecs_patterns.LoadBalancedEc2Service(
            self,
            'Ec2Service-' + branch,
            cluster=cluster,
            # service_name='Frontend-1-'+branch,
            memory_limit_mib=2048,
            container_port=80,
            environment={
                "PORT": '80',
                'NEO4J_USER': os.environ['NEO4J_USER'],
                'NEO4J_PASSWORD': os.environ['NEO4J_PASSWORD']
            },
            domain_name=domain_name,
            domain_zone=hosted_zone,
            # image=ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample"),
            image=ecs.ContainerImage.from_ecr_repository(ecr_repo.flask),
            public_load_balancer=True,
        )

        core.CfnOutput(
            self,
            'ECRRepoURI-' + branch,
            description="The URI of the ECR Repo for flask frontend",
            value=ecr_repo.flask.repository_uri)

        core.CfnOutput(
            self,
            "CodePipelinURL-" + branch,
            description="The URL of the created Pipeline",
            value=
            "https://{}.console.aws.amazon.com/codepipeline/home?region={}#/view/{}"
            .format(os.environ['AWS_REGION'], os.environ['AWS_REGION'],
                    pipeline.pipeline.pipeline_name))

        core.CfnOutput(self,
                       "LoadBalancerDNS-" + branch,
                       value=ecs_service.load_balancer.load_balancer_dns_name)
示例#8
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ###########################################################################
        # AWS LAMBDA FUNCTIONS
        ###########################################################################
        parse_image_list_file = aws_lambda.Function(
            self,
            'parse_image_list_file',
            handler='parse_image_list_file.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset('parse_image_list_file'),
            memory_size=10240,
            timeout=core.Duration.seconds(300),
            log_retention=aws_logs.RetentionDays.ONE_DAY)

        list_objects = aws_lambda.Function(
            self,
            'list_objects',
            handler='list_objects.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset('list_objects'),
            memory_size=4096,
            timeout=core.Duration.seconds(300),
            log_retention=aws_logs.RetentionDays.ONE_DAY)

        get_size_and_store = aws_lambda.Function(
            self,
            'get_size_and_store',
            handler='get_size_and_store.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset('get_size_and_store'),
            memory_size=4096,
            timeout=core.Duration.seconds(300),
            log_retention=aws_logs.RetentionDays.ONE_DAY)

        ###########################################################################
        # AMAZON S3 BUCKETS
        ###########################################################################
        images_bucket = aws_s3.Bucket(self, "images_bucket")

        ###########################################################################
        # LAMBDA SUPPLEMENTAL POLICIES
        ###########################################################################
        lambda_supplemental_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=["s3:Get*", "s3:Head*", "s3:List*", "sqs:*", "es:*"],
            resources=["*"])

        parse_image_list_file.add_to_role_policy(
            lambda_supplemental_policy_statement)
        list_objects.add_to_role_policy(lambda_supplemental_policy_statement)
        get_size_and_store.add_to_role_policy(
            lambda_supplemental_policy_statement)

        ###########################################################################
        # AWS SNS TOPICS
        ###########################################################################
        # notification_topic = aws_sns.Topic(self, "notification_topic")

        ###########################################################################
        # ADD AMAZON S3 BUCKET NOTIFICATIONS
        ###########################################################################
        images_bucket.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED,
            aws_s3_notifications.LambdaDestination(parse_image_list_file))

        ###########################################################################
        # AWS SQS QUEUES
        ###########################################################################
        comprehend_queue_iqueue = aws_sqs.Queue(self,
                                                "comprehend_queue_iqueue")
        comprehend_queue_iqueue_dlq = aws_sqs.DeadLetterQueue(
            max_receive_count=10, queue=comprehend_queue_iqueue)
        comprehend_queue = aws_sqs.Queue(
            self,
            "comprehend_queue",
            visibility_timeout=core.Duration.seconds(301),
            dead_letter_queue=comprehend_queue_iqueue_dlq)

        rekognition_queue_iqueue = aws_sqs.Queue(self,
                                                 "rekognition_queue_iqueue")
        rekognition_queue_dlq = aws_sqs.DeadLetterQueue(
            max_receive_count=10, queue=rekognition_queue_iqueue)
        rekognition_queue = aws_sqs.Queue(
            self,
            "rekognition_queue",
            visibility_timeout=core.Duration.seconds(301),
            dead_letter_queue=rekognition_queue_dlq)

        object_queue_iqueue = aws_sqs.Queue(self, "object_queue_iqueue")
        object_queue_dlq = aws_sqs.DeadLetterQueue(max_receive_count=10,
                                                   queue=object_queue_iqueue)
        object_queue = aws_sqs.Queue(
            self,
            "object_queue",
            visibility_timeout=core.Duration.seconds(301),
            dead_letter_queue=object_queue_dlq)

        ###########################################################################
        # AWS LAMBDA SQS EVENT SOURCE
        ###########################################################################
        get_size_and_store.add_event_source(
            SqsEventSource(object_queue, batch_size=10))

        ###########################################################################
        # AWS ELASTICSEARCH DOMAIN
        ###########################################################################
        s3workflow_domain = aws_elasticsearch.Domain(
            self,
            "s3workflow_domain",
            version=aws_elasticsearch.ElasticsearchVersion.V7_1,
            capacity={
                "master_nodes": 3,
                "data_nodes": 4
            },
            ebs={"volume_size": 100},
            zone_awareness={"availability_zone_count": 2},
            logging={
                "slow_search_log_enabled": True,
                "app_log_enabled": True,
                "slow_index_log_enabled": True
            })

        ###########################################################################
        # AMAZON COGNITO USER POOL
        ###########################################################################
        s3workflow_pool = aws_cognito.UserPool(
            self,
            "s3workflow-pool",
            account_recovery=None,
            auto_verify=None,
            custom_attributes=None,
            email_settings=None,
            enable_sms_role=None,
            lambda_triggers=None,
            mfa=None,
            mfa_second_factor=None,
            password_policy=None,
            self_sign_up_enabled=None,
            sign_in_aliases=aws_cognito.SignInAliases(email=True,
                                                      phone=None,
                                                      preferred_username=None,
                                                      username=True),
            sign_in_case_sensitive=None,
            sms_role=None,
            sms_role_external_id=None,
            standard_attributes=None,
            user_invitation=None,
            user_pool_name=None,
            user_verification=None)

        ###########################################################################
        # AMAZON VPC
        ###########################################################################
        vpc = aws_ec2.Vpc(self, "s3workflowVPC",
                          max_azs=3)  # default is all AZs in region

        ###########################################################################
        # AMAZON ECS CLUSTER
        ###########################################################################
        cluster = aws_ecs.Cluster(self, "s3", vpc=vpc)

        ###########################################################################
        # AMAZON ECS Repositories
        ###########################################################################
        rekognition_repository = aws_ecr.Repository(
            self,
            "rekognition_repository",
            image_scan_on_push=True,
            removal_policy=core.RemovalPolicy("DESTROY"))
        comprehend_repository = aws_ecr.Repository(
            self,
            "comprehend_repository",
            image_scan_on_push=True,
            removal_policy=core.RemovalPolicy("DESTROY"))

        ###########################################################################
        # AMAZON ECS Roles and Policies
        ###########################################################################
        task_execution_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "logs:*", "ecs:*", "ec2:*", "elasticloadbalancing:*", "ecr:*"
            ],
            resources=["*"])
        task_execution_policy_document = aws_iam.PolicyDocument()
        task_execution_policy_document.add_statements(
            task_execution_policy_statement)
        task_execution_policy = aws_iam.Policy(
            self,
            "task_execution_policy",
            document=task_execution_policy_document)
        task_execution_role = aws_iam.Role(
            self,
            "task_execution_role",
            assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com'))
        task_execution_role.attach_inline_policy(task_execution_policy)

        task_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "logs:*", "xray:*", "sqs:*", "s3:*", "rekognition:*",
                "comprehend:*", "es:*"
            ],
            resources=["*"])
        task_policy_document = aws_iam.PolicyDocument()
        task_policy_document.add_statements(task_policy_statement)
        task_policy = aws_iam.Policy(self,
                                     "task_policy",
                                     document=task_policy_document)
        task_role = aws_iam.Role(
            self,
            "task_role",
            assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com'))
        task_role.attach_inline_policy(task_policy)

        ###########################################################################
        # AMAZON ECS Task definitions
        ###########################################################################
        rekognition_task_definition = aws_ecs.TaskDefinition(
            self,
            "rekognition_task_definition",
            compatibility=aws_ecs.Compatibility("FARGATE"),
            cpu="1024",
            # ipc_mode=None,
            memory_mib="2048",
            network_mode=aws_ecs.NetworkMode("AWS_VPC"),
            # pid_mode=None,                                      #Not supported in Fargate and Windows containers
            # placement_constraints=None,
            execution_role=task_execution_role,
            # family=None,
            # proxy_configuration=None,
            task_role=task_role
            # volumes=None
        )

        comprehend_task_definition = aws_ecs.TaskDefinition(
            self,
            "comprehend_task_definition",
            compatibility=aws_ecs.Compatibility("FARGATE"),
            cpu="1024",
            # ipc_mode=None,
            memory_mib="2048",
            network_mode=aws_ecs.NetworkMode("AWS_VPC"),
            # pid_mode=None,                                      #Not supported in Fargate and Windows containers
            # placement_constraints=None,
            execution_role=task_execution_role,
            # family=None,
            # proxy_configuration=None,
            task_role=task_role
            # volumes=None
        )

        ###########################################################################
        # AMAZON ECS Images
        ###########################################################################
        rekognition_ecr_image = aws_ecs.EcrImage(
            repository=rekognition_repository, tag="latest")
        comprehend_ecr_image = aws_ecs.EcrImage(
            repository=comprehend_repository, tag="latest")

        ###########################################################################
        # ENVIRONMENT VARIABLES
        ###########################################################################
        environment_variables = {}
        environment_variables["COMPREHEND_QUEUE"] = comprehend_queue.queue_url
        environment_variables[
            "REKOGNITION_QUEUE"] = rekognition_queue.queue_url
        environment_variables["IMAGES_BUCKET"] = images_bucket.bucket_name
        environment_variables[
            "ELASTICSEARCH_HOST"] = s3workflow_domain.domain_endpoint

        parse_image_list_file.add_environment(
            "ELASTICSEARCH_HOST", s3workflow_domain.domain_endpoint)
        parse_image_list_file.add_environment("QUEUEURL",
                                              rekognition_queue.queue_url)
        parse_image_list_file.add_environment("DEBUG", "False")
        parse_image_list_file.add_environment("BUCKET", "-")
        parse_image_list_file.add_environment("KEY", "-")

        list_objects.add_environment("QUEUEURL", object_queue.queue_url)
        list_objects.add_environment("ELASTICSEARCH_HOST",
                                     s3workflow_domain.domain_endpoint)
        list_objects.add_environment("S3_BUCKET_NAME",
                                     images_bucket.bucket_name)
        list_objects.add_environment("S3_BUCKET_PREFIX", "images/")
        list_objects.add_environment("S3_BUCKET_SUFFIX", "")
        list_objects.add_environment("LOGGING_LEVEL", "INFO")

        get_size_and_store.add_environment("QUEUEURL", object_queue.queue_url)
        get_size_and_store.add_environment("ELASTICSEARCH_HOST",
                                           s3workflow_domain.domain_endpoint)
        get_size_and_store.add_environment("S3_BUCKET_NAME",
                                           images_bucket.bucket_name)
        get_size_and_store.add_environment("S3_BUCKET_PREFIX", "images/")
        get_size_and_store.add_environment("S3_BUCKET_SUFFIX", "")
        get_size_and_store.add_environment("LOGGING_LEVEL", "INFO")

        ###########################################################################
        # ECS Log Drivers
        ###########################################################################
        rekognition_task_log_driver = aws_ecs.LogDriver.aws_logs(
            stream_prefix="s3workflow",
            log_retention=aws_logs.RetentionDays("ONE_DAY"))
        comprehend_task_log_driver = aws_ecs.LogDriver.aws_logs(
            stream_prefix="s3workflow",
            log_retention=aws_logs.RetentionDays("ONE_DAY"))

        ###########################################################################
        # ECS Task Definitions
        ###########################################################################
        rekognition_task_definition.add_container(
            "rekognition_task_definition",
            image=rekognition_ecr_image,
            memory_reservation_mib=1024,
            environment=environment_variables,
            logging=rekognition_task_log_driver)

        comprehend_task_definition.add_container(
            "comprehend_task_definition",
            image=comprehend_ecr_image,
            memory_reservation_mib=1024,
            environment=environment_variables,
            logging=comprehend_task_log_driver)

        ###########################################################################
        # AWS ROUTE53 HOSTED ZONE
        ###########################################################################
        hosted_zone = aws_route53.HostedZone(
            self,
            "hosted_zone",
            zone_name="s3workflow.com",
            comment="private hosted zone for s3workflow system")
        hosted_zone.add_vpc(vpc)
示例#9
0
    def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Validated require props.
        required_props_keys = ['CfOriginDomainName', 'Asg', 'HostedZoneName', 'WebsiteDns']
        for k in required_props_keys:
            if k not in props or not props[k]:
                raise ValueError("Required prop %s is not present" % k)

        # Create a custom resource that returns the IP of the host behind the autoscaling group
        asg = props['Asg']
        asg_ip_handler = lambda_.Function(
            self, 'GhostIpHandler',
            runtime=lambda_.Runtime.PYTHON_3_6,
            code=lambda_.Code.asset('lambda'),
            handler='ghost_ip.handler',
        )

        asg_ip_handler.add_to_role_policy(
            statement=iam.PolicyStatement(
                actions=['autoscaling:DescribeAutoScalingGroups', 'ec2:DescribeInstances'],
                resources=['*', '*'],
            )
        )

        asg_ip_provider = cr.Provider(
            self, 'GhostIpProvider',
            on_event_handler=asg_ip_handler,
        )

        asg_ip_resource = cfn.CustomResource(
            self, 'GhostIpResource',
            provider=asg_ip_provider,
            properties={
                'AsgName': asg.auto_scaling_group_name,
                'ts': time.time(), # this makes sure the function is invoked for every CFN update
            }
        )

        # Create R53 HZ and cf origin domain
        if 'ExistingHostedZoneId' in props and props['ExistingHostedZoneId']:
            hz = route53.HostedZone.from_hosted_zone_attributes(
                self, 'HostedZone', 
                zone_name=props['HostedZoneName'],
                hosted_zone_id=props['ExistingHostedZoneId'],
            )
        else:
            hz = route53.HostedZone(
                self, 'HostedZone',
                zone_name=props['HostedZoneName']
            )

        origin_rrset = route53.ARecord(
            self, 'OriginRecord',
            target=route53.RecordTarget.from_ip_addresses(asg_ip_resource.get_att_string('GhostIp')),
            record_name=props['CfOriginDomainName'],
            zone=hz,
        )

        # Create a CF distro
        acm_cert = acm.DnsValidatedCertificate(
            self, 'GhostAcmCert',
            hosted_zone=hz,
            domain_name=props['WebsiteDns'],
            region='us-east-1',
        )

        cf_distro = cf.CloudFrontWebDistribution(
            self, 'CfDistro',
            origin_configs=[cf.SourceConfiguration(
                custom_origin_source=cf.CustomOriginConfig(
                    domain_name=props['CfOriginDomainName'],
                    origin_protocol_policy=cf.OriginProtocolPolicy.HTTP_ONLY,
                ),
                behaviors=[cf.Behavior(is_default_behavior=True)],
            )],
            alias_configuration=cf.AliasConfiguration(
                names=[props['WebsiteDns']],
                acm_cert_ref=acm_cert.certificate_arn,
            ),
            default_root_object='',
        )

        # Create the top level website DNS pointing to the CF distro
        ghost_rrset = route53.CnameRecord(
            self, 'GhostDns',
            domain_name=cf_distro.domain_name,
            zone=hz,
            record_name=props['WebsiteDns'],
        )
示例#10
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 *,
                 stage: str = '',
                 bucket_name: str = None,
                 website_params: dict = None,
                 hosted_params: dict = None,
                 **kwargs) -> None:
        """
        deploys all AWS resources for your static website
            Resources:
                AWS::S3::Bucket for your website
                AWS::S3::BucketPolicy with read-only policy
                AWS::CloudFront::Distribution with bucket like origin
                AWS::Route53::HostedZone if you pass only zone_name and not zone_id
                AWS::Route53::RecordSetGroup with name the bucket and target the distribution
        """
        super().__init__(scope, id, **kwargs)

        if scope.node.try_get_context("stage"):
            stage = scope.node.try_get_context("stage")
        if stage:
            stage = stage + '-'

        website_bucket = s3.Bucket(
            self,
            id + "Bucket",
            bucket_name=stage + bucket_name,
            access_control=s3.BucketAccessControl('PUBLIC_READ'),
            website_index_document=website_params['index_document'],
            website_error_document=website_params['error_document'])

        policy_document = iam.PolicyDocument()
        policy_statement = iam.PolicyStatement(
            actions=["s3:GetObject"],
            effect=iam.Effect("ALLOW"),
            resources=[website_bucket.bucket_arn + "/*"])
        policy_statement.add_any_principal()
        policy_document.add_statements(policy_statement)

        bucket_policy = s3.CfnBucketPolicy(self,
                                           id + "Policy",
                                           bucket=website_bucket.bucket_name,
                                           policy_document=policy_document)

        distribution = cloudfront.CloudFrontWebDistribution(
            self,
            id + "Distribution",
            origin_configs=[
                cloudfront.SourceConfiguration(
                    s3_origin_source=cloudfront.S3OriginConfig(
                        s3_bucket_source=website_bucket),
                    behaviors=[cloudfront.Behavior(is_default_behavior=True)])
            ])

        hosted_zone = None
        if hosted_params and "zone_id" not in hosted_params:
            hosted_zone = route53.HostedZone(
                self, id + "Hosted", zone_name=hosted_params['zone_name'])
            hosted_params['zone_id'] = hosted_zone.hosted_zone_id

        dns_record = None
        if hosted_params and "zone_name" in hosted_params:
            dns_record = route53.CfnRecordSetGroup(
                self,
                id + "Record",
                hosted_zone_name=hosted_params['zone_name'] + ".",
                record_sets=[
                    route53.CfnRecordSetGroup.RecordSetProperty(
                        name=website_bucket.bucket_name + ".",
                        type="A",
                        alias_target=route53.CfnRecordSetGroup.
                        AliasTargetProperty(
                            dns_name=distribution.distribution_domain_name,
                            hosted_zone_id=hosted_params['zone_id']))
                ])
示例#11
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        # super().__init__(scope, id, context, outdir)

        print()
        # Common Stack Tags
        for k, v in constants.COMMON_TAGS.items():
            core.Tag.add(self, key=k, value=v)

        # Hosted Zone
        if constants.HOSTED_ZONE["id"]:
            hosted_zone = aws_route53.HostedZone.from_hosted_zone_attributes(
                self,
                "ImportedHostedZone",
                hosted_zone_id=constants.HOSTED_ZONE["id"],
                zone_name=constants.HOSTED_ZONE["name"])
        else:
            hosted_zone = aws_route53.HostedZone(
                self,
                "MainHostedZone",
                zone_name=constants.HOSTED_ZONE["name"],
                comment="Hosted Zone for {}".format(
                    constants.HOSTED_ZONE["name"]),
            )

        # ACM Certificate
        if constants.CERTIFICATE["arn"]:
            acm_certificate = aws_certificatemanager.Certificate.from_certificate_arn(
                self,
                "ImportedCertificate",
                certificate_arn=constants.CERTIFICATE["arn"])
        else:
            acm_certificate = aws_certificatemanager.DnsValidatedCertificate(
                self,
                "CloudFrontCertificate",
                hosted_zone=hosted_zone,
                region=constants.CERTIFICATE["region"],
                domain_name=constants.HOSTED_ZONE["domain"],
                subject_alternative_names=constants.CERTIFICATE["alt_domains"],
                validation_method=aws_certificatemanager.ValidationMethod.DNS)
            acm_certificate.node.add_dependency(hosted_zone)

        # Website Bucket
        website_bucket = aws_s3.Bucket(
            self,
            "WebsiteBucket",
            encryption=aws_s3.BucketEncryption.S3_MANAGED,
            removal_policy=core.RemovalPolicy.DESTROY)

        # Cloudfront Origin Access Identity (OAI)
        website_bucket_oai = aws_cloudfront.CfnCloudFrontOriginAccessIdentity(
            self,
            "CloudfrontOAI",
            cloud_front_origin_access_identity_config=aws_cloudfront.
            CfnCloudFrontOriginAccessIdentity.
            CloudFrontOriginAccessIdentityConfigProperty(
                comment="CloudFrontOAIFor{}".format(
                    constants.PROJECT_CODE.capitalize())))

        # Canonical User Principal of OAI
        oai_canonical_user_principal = aws_iam.CanonicalUserPrincipal(
            website_bucket_oai.attr_s3_canonical_user_id)

        # Website Bucket Policy
        website_bucket.add_to_resource_policy(
            aws_iam.PolicyStatement(
                actions=["s3:GetObject"],
                resources=[website_bucket.arn_for_objects("*")],
                principals=[oai_canonical_user_principal],
                effect=aws_iam.Effect.ALLOW))

        # Adopt Lambda Function
        lambda_function = aws_lambda.Function.from_function_arn(
            self,
            "UrlRewriteFunction",
            function_arn=constants.URL_REWRITE_FUNCTION_ARN)

        lambda_function_version_arn = aws_lambda.Version.from_version_arn(
            self,
            "LambdaFunctionArn",
            version_arn=constants.URL_REWRITE_FUNCTION_VERSION_ARN)

        # CloudFront Web Distribution
        cloudfront_distribution = aws_cloudfront.CloudFrontWebDistribution(
            self,
            "CloudFrontDistribution",
            comment="waqqas.tech",
            default_root_object="index.html",
            viewer_protocol_policy=aws_cloudfront.ViewerProtocolPolicy.
            REDIRECT_TO_HTTPS,
            alias_configuration=aws_cloudfront.AliasConfiguration(
                acm_cert_ref=acm_certificate.certificate_arn,
                security_policy=aws_cloudfront.SecurityPolicyProtocol.
                TLS_V1_2_2018,
                names=constants.CLOUDFRONT["alt_domains"]),
            origin_configs=[
                aws_cloudfront.SourceConfiguration(
                    s3_origin_source=aws_cloudfront.S3OriginConfig(
                        s3_bucket_source=website_bucket,
                        origin_access_identity_id=website_bucket_oai.ref),
                    behaviors=[
                        aws_cloudfront.Behavior(
                            allowed_methods=aws_cloudfront.
                            CloudFrontAllowedMethods.GET_HEAD_OPTIONS,
                            cached_methods=aws_cloudfront.
                            CloudFrontAllowedCachedMethods.GET_HEAD,
                            compress=True,
                            is_default_behavior=True,
                            path_pattern="*",
                            default_ttl=core.Duration.seconds(
                                amount=constants.CLOUDFRONT['default_ttl']),
                            lambda_function_associations=[
                                aws_cloudfront.LambdaFunctionAssociation(
                                    event_type=aws_cloudfront.
                                    LambdaEdgeEventType.ORIGIN_REQUEST,
                                    lambda_function=lambda_function_version_arn
                                )
                            ])
                    ])
            ])

        # CloudFront Route53 Record
        primary_dns_record = aws_route53.ARecord(
            self,
            "PrimaryDNSRecord",
            zone=hosted_zone,
            comment="{} CloudFront Dist Alias Record".format(
                constants.PROJECT_CODE),
            record_name="{}.".format(constants.HOSTED_ZONE["domain"]),
            target=aws_route53.RecordTarget.from_alias(
                aws_route53_targets.CloudFrontTarget(cloudfront_distribution)),
            ttl=core.Duration.seconds(
                amount=constants.CLOUDFRONT["default_ttl"]),
        )

        # Artifact Bucket
        artifact_bucket = aws_s3.Bucket(
            self,
            "ArtifactBucket",
            encryption=aws_s3.BucketEncryption.S3_MANAGED,
            removal_policy=core.RemovalPolicy.DESTROY)

        # CodeBuild
        codebuild_environment_variables = aws_codebuild.BuildEnvironmentVariable(
            value=website_bucket.bucket_name)
        codebuild_environment = aws_codebuild.BuildEnvironment(
            build_image=aws_codebuild.LinuxBuildImage.
            UBUNTU_14_04_PYTHON_3_7_1,
            compute_type=aws_codebuild.ComputeType.SMALL)
        codebuild_buildspec = aws_codebuild.BuildSpec.from_object(
            value=buildspec.BUILDSPEC)
        codebuild_project = aws_codebuild.PipelineProject(
            self,
            "CodeBuildProject",
            environment_variables={
                "BUCKET_NAME": codebuild_environment_variables
            },
            environment=codebuild_environment,
            build_spec=codebuild_buildspec,
            description="CodeBuild Project for {} Content".format(
                constants.PROJECT_CODE),
            timeout=core.Duration.seconds(amount=300))
        # TODO: Lock down permissions for buckets
        codebuild_project.add_to_role_policy(
            aws_iam.PolicyStatement(actions=["s3:*"],
                                    effect=aws_iam.Effect.ALLOW,
                                    resources=[
                                        website_bucket.arn_for_objects("*"),
                                        artifact_bucket.arn_for_objects("*"),
                                        website_bucket.bucket_arn,
                                        artifact_bucket.bucket_arn,
                                    ]))
        # Codepipeline
        codepipeline = aws_codepipeline.Pipeline(
            self,
            "CodePipelineWebsiteContent",
            artifact_bucket=artifact_bucket,
            stages=[
                aws_codepipeline.StageProps(
                    stage_name="Source",
                    actions=[
                        aws_codepipeline_actions.GitHubSourceAction(
                            oauth_token=core.SecretValue(
                                value=constants.GITHUB_OAUTH_TOKEN),
                            output=aws_codepipeline.Artifact(
                                artifact_name="source"),
                            owner=constants.GITHUB_USER_NAME,
                            repo=constants.GITHUB_REPO_NAME,
                            branch=constants.BRANCH_NAME,
                            action_name="GithubSource",
                            trigger=aws_codepipeline_actions.GitHubTrigger.
                            WEBHOOK)
                    ]),
                aws_codepipeline.StageProps(
                    stage_name="Build",
                    actions=[
                        aws_codepipeline_actions.CodeBuildAction(
                            input=aws_codepipeline.Artifact(
                                artifact_name="source"),
                            project=codebuild_project,
                            type=aws_codepipeline_actions.CodeBuildActionType.
                            BUILD,
                            action_name="HugoBuild")
                    ])
            ])
        # TODO: Lock down permissions for buckets
        codepipeline.add_to_role_policy(
            aws_iam.PolicyStatement(actions=["s3:*"],
                                    effect=aws_iam.Effect.ALLOW,
                                    resources=[
                                        website_bucket.arn_for_objects("*"),
                                        artifact_bucket.arn_for_objects("*"),
                                        website_bucket.bucket_arn,
                                        artifact_bucket.bucket_arn,
                                    ]))
示例#12
0
    def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)


        ###########################################################################
        # AMAZON VPC  
        ###########################################################################
        vpc = ec2.Vpc(self, "LoadTestVPC", max_azs=3)     # default is all AZs in region


        ###########################################################################
        # AMAZON ECS Repositories  
        ###########################################################################
        # get_repository = aws_ecs.IRepository(self, "get_repository", image_scan_on_push=True, removal_policy=aws_cdk.core.RemovalPolicy("DESTROY") )
        # put_repository = aws_ecs.IRepository(self, "put_repository", image_scan_on_push=True, removal_policy=aws_cdk.core.RemovalPolicy("DESTROY") )
        get_repository = aws_ecr.Repository(self, "get_repository", image_scan_on_push=True, removal_policy=core.RemovalPolicy("DESTROY") )
        put_repository = aws_ecr.Repository(self, "put_repository", image_scan_on_push=True, removal_policy=core.RemovalPolicy("DESTROY") )
        xray_repository = aws_ecr.Repository(self, "xray_repository", image_scan_on_push=True, removal_policy=core.RemovalPolicy("DESTROY") )


        ###########################################################################
        # AMAZON ECS Roles and Policies
        ###########################################################################        
        task_execution_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=["logs:*", "ecs:*", "ec2:*", "elasticloadbalancing:*","ecr:*"],
            resources=["*"]
            )
        task_execution_policy_document = aws_iam.PolicyDocument()
        task_execution_policy_document.add_statements(task_execution_policy_statement)
        task_execution_policy = aws_iam.Policy(self, "task_execution_policy", document=task_execution_policy_document)
        task_execution_role = aws_iam.Role(self, "task_execution_role", assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com') )
        task_execution_role.attach_inline_policy(task_execution_policy)

        task_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=["logs:*", "xray:*", "sqs:*", "s3:*"],
            resources=["*"]
            )
        task_policy_document = aws_iam.PolicyDocument()
        task_policy_document.add_statements(task_policy_statement)
        task_policy = aws_iam.Policy(self, "task_policy", document=task_policy_document)
        task_role = aws_iam.Role(self, "task_role", assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com') )
        task_role.attach_inline_policy(task_policy)


        ###########################################################################
        # AMAZON ECS Task definitions
        ###########################################################################
        get_task_definition = aws_ecs.TaskDefinition(self, "gettaskdefinition",
                                                                        compatibility=aws_ecs.Compatibility("FARGATE"), 
                                                                        cpu="1024", 
                                                                        # ipc_mode=None, 
                                                                        memory_mib="2048", 
                                                                        network_mode=aws_ecs.NetworkMode("AWS_VPC"), 
                                                                        # pid_mode=None,                                      #Not supported in Fargate and Windows containers
                                                                        # placement_constraints=None, 
                                                                        execution_role=task_execution_role, 
                                                                        # family=None, 
                                                                        # proxy_configuration=None, 
                                                                        task_role=task_role
                                                                        # volumes=None
                                                                        )

        put_task_definition = aws_ecs.TaskDefinition(self, "puttaskdefinition",
                                                                        compatibility=aws_ecs.Compatibility("FARGATE"), 
                                                                        cpu="1024", 
                                                                        # ipc_mode=None, 
                                                                        memory_mib="2048", 
                                                                        network_mode=aws_ecs.NetworkMode("AWS_VPC"), 
                                                                        # pid_mode=None,                                      #Not supported in Fargate and Windows containers
                                                                        # placement_constraints=None, 
                                                                        execution_role=task_execution_role, 
                                                                        # family=None, 
                                                                        # proxy_configuration=None, 
                                                                        task_role=task_role
                                                                        # volumes=None
                                                                        )


        ###########################################################################
        # AMAZON S3 BUCKETS 
        ###########################################################################
        storage_bucket = aws_s3.Bucket(self, "storage_bucket")


        ###########################################################################
        # AWS SQS QUEUES
        ###########################################################################
        ecs_task_queue_iqueue = aws_sqs.Queue(self, "ecs_task_queue_iqueue_dlq")
        ecs_task_queue_queue_dlq = aws_sqs.DeadLetterQueue(max_receive_count=10, queue=ecs_task_queue_iqueue)
        ecs_task_queue_queue = aws_sqs.Queue(self, "ecs_task_queue_queue", visibility_timeout=core.Duration.seconds(300), dead_letter_queue=ecs_task_queue_queue_dlq)


        ###########################################################################
        # AMAZON ECS Images 
        ###########################################################################
        get_repository_ecr_image = aws_ecs.EcrImage(repository=get_repository, tag="latest")
        put_repository_ecr_image = aws_ecs.EcrImage(repository=put_repository, tag="latest")
        xray_repository_ecr_image = aws_ecs.EcrImage(repository=xray_repository, tag="latest")
        environment_variables = {}
        environment_variables["SQS_QUEUE"] = ecs_task_queue_queue.queue_url
        environment_variables["S3_BUCKET"] = storage_bucket.bucket_name
        
        get_task_log_driver = aws_ecs.LogDriver.aws_logs(stream_prefix="S3LoadTest", log_retention=aws_logs.RetentionDays("ONE_WEEK"))
        put_task_log_driver = aws_ecs.LogDriver.aws_logs(stream_prefix="S3LoadTest", log_retention=aws_logs.RetentionDays("ONE_WEEK"))
        xray_task_log_driver = aws_ecs.LogDriver.aws_logs(stream_prefix="S3LoadTest", log_retention=aws_logs.RetentionDays("ONE_WEEK"))


        get_task_definition.add_container("get_task_definition_get", 
                                                    image=get_repository_ecr_image, 
                                                    memory_reservation_mib=1024,
                                                    environment=environment_variables,
                                                    logging=get_task_log_driver
                                                    )
        get_task_definition.add_container("get_task_definition_xray", 
                                                    image=xray_repository_ecr_image, 
                                                    memory_reservation_mib=1024,
                                                    environment=environment_variables,
                                                    logging=xray_task_log_driver
                                                    )

        put_task_definition.add_container("put_task_definition_put", 
                                                    image=put_repository_ecr_image, 
                                                    memory_reservation_mib=1024,
                                                    environment=environment_variables,
                                                    logging=put_task_log_driver
                                                    )
        put_task_definition.add_container("put_task_definition_xray", 
                                                    image=xray_repository_ecr_image, 
                                                    memory_reservation_mib=1024,
                                                    environment=environment_variables,
                                                    logging=xray_task_log_driver
                                                    )


        ###########################################################################
        # AMAZON ECS CLUSTER 
        ###########################################################################
        cluster = aws_ecs.Cluster(self, "LoadTestCluster", vpc=vpc)


        ###########################################################################
        # AWS ROUTE53 HOSTED ZONE 
        ###########################################################################
        hosted_zone = aws_route53.HostedZone(self, "hosted_zone", zone_name="loadtest.com" ,comment="private hosted zone for loadtest system")
        hosted_zone.add_vpc(vpc)
        bucket_record_values = [storage_bucket.bucket_name]
        queue_record_values = [ecs_task_queue_queue.queue_url]
        bucket_record_name = "bucket." + hosted_zone.zone_name
        queue_record_name = "filesqueue." + hosted_zone.zone_name
        hosted_zone_record_bucket = aws_route53.TxtRecord(self, "hosted_zone_record_bucket", record_name=bucket_record_name, values=bucket_record_values, zone=hosted_zone, comment="dns record for bucket name")
        hosted_zone_record_queue = aws_route53.TxtRecord(self, "hosted_zone_record_queue", record_name=queue_record_name, values=queue_record_values, zone=hosted_zone, comment="dns record for queue name")