def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        vpc = Vpc(self, "MyVpc", max_azs=2)

        ecs_cluster = Cluster(self, 'FagateCluster', vpc=vpc)

        alb = ApplicationLoadBalancer(self,
                                      'EcsLb',
                                      vpc=vpc,
                                      internet_facing=True)

        listener = alb.add_listener('EcsListener', port=80)

        listener.add_fixed_response('Default-Fix', status_code='404')
        listener.node.default_child.default_action = [{
            "type": "fixed-response",
            "fixedResponseConfig": {
                "statusCode": "404"
            }
        }]

        website_bucket = Bucket(self,
                                'PetclinicWebsite',
                                website_index_document='index.html',
                                public_read_access=True,
                                removal_policy=core.RemovalPolicy.DESTROY)

        deployment = BucketDeployment(
            self,
            'PetclinicDeployWebsite',
            sources=[Source.asset('./spring-petclinic-static')],
            destination_bucket=website_bucket,
            retain_on_delete=False
            #destination_key_prefix='web/static'
        )

        # Modify the config.js with CF custome resource
        modify_policy = [
            PolicyStatement(actions=[
                "s3:PutObject", "s3:PutObjectAcl", "s3:PutObjectVersionAcl",
                "s3:GetObject"
            ],
                            effect=Effect.ALLOW,
                            resources=[website_bucket.bucket_arn + "/*"]),
            PolicyStatement(actions=["s3:ListBucket"],
                            effect=Effect.ALLOW,
                            resources=[website_bucket.bucket_arn]),
            PolicyStatement(actions=["dynamodb:*"],
                            effect=Effect.ALLOW,
                            resources=[
                                "arn:aws:dynamodb:" + self.region + ":" +
                                self.account + ":*"
                            ])
        ]

        with open("custom-resource-code/init.py", encoding="utf-8") as fp:
            code_body = fp.read()

        dynamodb_tables = []

        for s in ['customers', 'vets', 'visits']:
            table = Table(
                self,
                s.capitalize() + 'Table',
                partition_key={
                    'name': 'id',
                    'type': AttributeType.STRING
                },
                removal_policy=core.RemovalPolicy.DESTROY,
                read_capacity=5,
                write_capacity=5,
            )

            dynamodb_tables.append(table.table_name)

            asset = DockerImageAsset(
                self,
                'spring-petclinic-' + s,
                repository_name=self.stack_name + '-' + s,
                directory='./spring-petclinic-serverless/spring-petclinic-' +
                s + '-serverless',
                build_args={
                    'JAR_FILE':
                    'spring-petclinic-' + s + '-serverless-2.0.7.jar'
                })

            ecs_task = FargateTaskDefinition(self,
                                             'TaskDef-Fargate-' + s,
                                             memory_limit_mib=512,
                                             cpu=256)

            ecs_task.add_to_task_role_policy(
                PolicyStatement(actions=["dynamodb:*"],
                                effect=Effect.ALLOW,
                                resources=[table.table_arn]))

            ecs_task.add_to_task_role_policy(
                PolicyStatement(actions=['xray:*'],
                                effect=Effect.ALLOW,
                                resources=['*']))

            env = {
                'DYNAMODB_TABLE_NAME': table.table_name,
                'SERVER_SERVLET_CONTEXT_PATH': '/api/' + s.rstrip('s')
            }

            ecs_container = ecs_task.add_container(
                'Container-' + s,
                image=ContainerImage.from_docker_image_asset(asset),
                logging=LogDriver.aws_logs(stream_prefix=s),
                environment=env)

            ecs_container.add_port_mappings(PortMapping(container_port=8080))

            # Sidecare Container for X-Ray
            ecs_sidecar_container = ecs_task.add_container(
                'Sidecar-Xray-' + s,
                image=ContainerImage.from_registry('amazon/aws-xray-daemon'))

            ecs_sidecar_container.add_port_mappings(
                PortMapping(container_port=2000, protocol=Protocol.UDP))

            ecs_service = FargateService(self,
                                         'FargateService-' + s,
                                         cluster=ecs_cluster,
                                         service_name='spring-petclinic-' + s,
                                         desired_count=2,
                                         task_definition=ecs_task)

            parttern = '/api/' + s.rstrip('s') + '/*'
            priority = randint(1, 10) * len(s)
            check = HealthCheck(
                path='/api/' + s.rstrip('s') + '/manage',
                healthy_threshold_count=2,
                unhealthy_threshold_count=3,
            )

            target = listener.add_targets('ECS-' + s,
                                          path_pattern=parttern,
                                          priority=priority,
                                          port=80,
                                          targets=[ecs_service],
                                          health_check=check)

        resource = CustomResource(
            self,
            "S3ModifyCustomResource",
            provider=CustomResourceProvider.lambda_(
                SingletonFunction(self,
                                  "CustomResourceSingleton",
                                  uuid="f7d4f730-4ee1-11e8-9c2d-fa7ae01bbebc",
                                  code=InlineCode(code_body),
                                  handler="index.handler",
                                  timeout=core.Duration.seconds(300),
                                  runtime=Runtime.PYTHON_3_7,
                                  initial_policy=modify_policy)),
            properties={
                "Bucket": website_bucket.bucket_name,
                "InvokeUrl": 'http://' + alb.load_balancer_dns_name + '/',
                "DynamoDBTables": dynamodb_tables
            })

        core.CfnOutput(self,
                       "FagateALBUrl",
                       export_name="FagateALBUrl",
                       value=alb.load_balancer_dns_name)
        core.CfnOutput(self,
                       "FagatePetclinicWebsiteUrl",
                       export_name="FagatePetclinicWebsiteUrl",
                       value=website_bucket.bucket_website_url)
예제 #2
0
 def __init__(self, scope: Construct, id: str, deploy_env: str, config: dict, **kwargs) -> None:
     super().__init__(scope, id, **kwargs)
     self.config = config
     self.deploy_env = deploy_env
     self.vpc = Vpc(self,  f"AirflowVPC-{deploy_env}", cidr="10.0.0.0/16", max_azs=config["max_vpc_azs"])
예제 #3
0
    def __init__(self, scope: Construct, stack_id: str, *, props: SEPStackProps, **kwargs):
        """
        Initialize a new instance of SEPStack
        :param scope: The scope of this construct.
        :param stack_id: The ID of this construct.
        :param props: The properties for this construct.
        :param kwargs: Any kwargs that need to be passed on to the parent class.
        """
        super().__init__(scope, stack_id, **kwargs)

         # The VPC that all components of the render farm will be created in.
        vpc = Vpc(
            self,
            'Vpc',
            max_azs=2,
        )

        recipes = ThinkboxDockerRecipes(
            self,
            'Image',
            stage=Stage.from_directory(props.docker_recipes_stage_path),
        )

        repository = Repository(
            self,
            'Repository',
            vpc=vpc,
            version=recipes.version,
            repository_installation_timeout=Duration.minutes(20),
            # TODO - Evaluate deletion protection for your own needs. These properties are set to RemovalPolicy.DESTROY
            # to cleanly remove everything when this stack is destroyed. If you would like to ensure
            # that these resources are not accidentally deleted, you should set these properties to RemovalPolicy.RETAIN
            # or just remove the removal_policy parameter.
            removal_policy=RepositoryRemovalPolicies(
                database=RemovalPolicy.DESTROY,
                filesystem=RemovalPolicy.DESTROY,
            ),
        )

        host = 'renderqueue'
        zone_name = 'deadline-test.internal'

        # Internal DNS zone for the VPC.
        dns_zone = PrivateHostedZone(
            self,
            'DnsZone',
            vpc=vpc,
            zone_name=zone_name,
        )

        ca_cert = X509CertificatePem(
            self,
            'RootCA',
            subject=DistinguishedName(
                cn='SampleRootCA',
            ),
        )

        server_cert = X509CertificatePem(
            self,
            'RQCert',
            subject=DistinguishedName(
                cn=f'{host}.{dns_zone.zone_name}',
                o='RFDK-Sample',
                ou='RenderQueueExternal',
            ),
            signing_certificate=ca_cert,
        )

        render_queue = RenderQueue(
            self,
            'RenderQueue',
            vpc=vpc,
            version=recipes.version,
            images=recipes.render_queue_images,
            repository=repository,
            # TODO - Evaluate deletion protection for your own needs. This is set to false to
            # cleanly remove everything when this stack is destroyed. If you would like to ensure
            # that this resource is not accidentally deleted, you should set this to true.
            deletion_protection=False,
            hostname=RenderQueueHostNameProps(
                hostname=host,
                zone=dns_zone,
            ),
            traffic_encryption=RenderQueueTrafficEncryptionProps(
                external_tls=RenderQueueExternalTLSProps(
                    rfdk_certificate=server_cert,
                ),
                internal_protocol=ApplicationProtocol.HTTPS,
            ),
        )

        if props.create_resource_tracker_role:
            # Creates the Resource Tracker Access role. This role is required to exist in your account so the resource tracker will work properly
            Role(
                self,
                'ResourceTrackerRole',
                assumed_by=ServicePrincipal('lambda.amazonaws.com'),
                managed_policies= [ManagedPolicy.from_aws_managed_policy_name('AWSThinkboxDeadlineResourceTrackerAccessPolicy')],
                role_name= 'DeadlineResourceTrackerAccessRole',
            )

        fleet = SpotEventPluginFleet(
            self,
            'SpotEventPluginFleet',
            vpc=vpc,
            render_queue=render_queue,
            deadline_groups=['group_name'],
            instance_types=[InstanceType.of(InstanceClass.BURSTABLE3, InstanceSize.LARGE)],
            worker_machine_image=props.worker_machine_image,
            max_capacity=1,
        )

        # Optional: Add additional tags to both spot fleet request and spot instances.
        Tags.of(fleet).add('name', 'SEPtest')

        ConfigureSpotEventPlugin(
            self,
            'ConfigureSpotEventPlugin',
            vpc=vpc,
            render_queue=render_queue,
            spot_fleets=[fleet],
            configuration=SpotEventPluginSettings(
                enable_resource_tracker=True,
            ),
        )
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Create two VPCs - one to host our private website, the other to act as a client
        website_vpc = Vpc(
            self,
            "WEBSITEVPC",
            cidr="10.0.0.0/16",
        )
        client_vpc = Vpc(
            self,
            "ClientVPC",
            cidr="10.1.0.0/16",
        )

        # Create a bastion host in the client API which will act like our client workstation
        bastion = BastionHostLinux(
            self,
            "WEBClient",
            vpc=client_vpc,
            instance_name='my-bastion',
            instance_type=InstanceType('t3.micro'),
            machine_image=AmazonLinuxImage(),
            subnet_selection=SubnetSelection(subnet_type=SubnetType.PRIVATE),
            security_group=SecurityGroup(
                scope=self,
                id='bastion-sg',
                security_group_name='bastion-sg',
                description=
                'Security group for the bastion, no inbound open because we should access'
                ' to the bastion via AWS SSM',
                vpc=client_vpc,
                allow_all_outbound=True))

        # Set up a VPC peering connection between client and API VPCs, and adjust
        # the routing table to allow connections back and forth
        VpcPeeringHelper(self, 'Peering', website_vpc, client_vpc)

        # Create VPC endpoints for API gateway
        vpc_endpoint = InterfaceVpcEndpoint(
            self,
            'APIGWVpcEndpoint',
            vpc=website_vpc,
            service=InterfaceVpcEndpointAwsService.APIGATEWAY,
            private_dns_enabled=True,
        )
        vpc_endpoint.connections.allow_from(bastion, Port.tcp(443))
        endpoint_id = vpc_endpoint.vpc_endpoint_id

        api_policy = iam.PolicyDocument(statements=[
            iam.PolicyStatement(principals=[iam.AnyPrincipal()],
                                actions=['execute-api:Invoke'],
                                resources=['execute-api:/*'],
                                effect=iam.Effect.DENY,
                                conditions={
                                    "StringNotEquals": {
                                        "aws:SourceVpce": endpoint_id
                                    }
                                }),
            iam.PolicyStatement(principals=[iam.AnyPrincipal()],
                                actions=['execute-api:Invoke'],
                                resources=['execute-api:/*'],
                                effect=iam.Effect.ALLOW)
        ])

        # Create an s3 bucket to hold the content
        content_bucket = s3.Bucket(self,
                                   "ContentBucket",
                                   removal_policy=core.RemovalPolicy.DESTROY)

        # Upload our static content to the bucket
        s3dep.BucketDeployment(self,
                               "DeployWithInvalidation",
                               sources=[s3dep.Source.asset('website')],
                               destination_bucket=content_bucket)

        # Create a private API GW in the API VPC
        api = apigw.RestApi(self,
                            'PrivateS3Api',
                            endpoint_configuration=apigw.EndpointConfiguration(
                                types=[apigw.EndpointType.PRIVATE],
                                vpc_endpoints=[vpc_endpoint]),
                            policy=api_policy)

        # Create a role to allow API GW to access our S3 bucket contents
        role = iam.Role(
            self,
            "Role",
            assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com"))
        role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=[
                                    content_bucket.bucket_arn,
                                    content_bucket.bucket_arn + '/*'
                                ],
                                actions=["s3:Get*"]))

        # Create a proxy resource that captures all non-root resource requests
        resource = api.root.add_resource("{proxy+}")
        # Create an integration with S3
        resource_integration = apigw.Integration(
            type=apigw.IntegrationType.AWS,
            integration_http_method='GET',
            options=apigw.IntegrationOptions(
                request_parameters=
                {  # map the proxy parameter so we can pass the request path
                    "integration.request.path.proxy":
                    "method.request.path.proxy"
                },
                integration_responses=[
                    apigw.IntegrationResponse(
                        status_code='200',
                        response_parameters=
                        {  # map the content type of the S3 object back to the HTTP response
                            "method.response.header.Content-Type":
                            "integration.response.header.Content-Type"
                        })
                ],
                credentials_role=role),
            # reference the bucket content we want to retrieve
            uri='arn:aws:apigateway:eu-west-1:s3:path/%s/{proxy}' %
            (content_bucket.bucket_name))
        # handle the GET request and map it to our new integration
        resource.add_method(
            "GET",
            resource_integration,
            method_responses=[
                apigw.MethodResponse(status_code='200',
                                     response_parameters={
                                         "method.response.header.Content-Type":
                                         False
                                     })
            ],
            request_parameters={"method.request.path.proxy": True})
        # Handle requests to the root of our site
        # Create another integration with S3 - this time with no proxy parameter
        resource_integration = apigw.Integration(
            type=apigw.IntegrationType.AWS,
            integration_http_method='GET',
            options=apigw.IntegrationOptions(
                integration_responses=[
                    apigw.IntegrationResponse(
                        status_code='200',
                        response_parameters=
                        {  # map the content type of the S3 object back to the HTTP response
                            "method.response.header.Content-Type":
                            "integration.response.header.Content-Type"
                        })
                ],
                credentials_role=role),
            # reference the bucket content we want to retrieve
            uri='arn:aws:apigateway:eu-west-1:s3:path/%s/index.html' %
            (content_bucket.bucket_name))
        # handle the GET request and map it to our new integration
        api.root.add_method("GET",
                            resource_integration,
                            method_responses=[
                                apigw.MethodResponse(
                                    status_code='200',
                                    response_parameters={
                                        "method.response.header.Content-Type":
                                        False
                                    })
                            ])