예제 #1
0
 def add_uploads_policy(bucket: aws_s3.Bucket, role: aws_iam.Role):
     policy = aws_iam.PolicyStatement(
         effect=aws_iam.Effect.ALLOW,
         actions=["s3:PutObject"],
         resources=[f"arn:aws:s3:::{bucket.bucket_name}/uploads/*"],
     )
     role.add_to_policy(policy)
예제 #2
0
    def attach_iam_policies_to_role(cls, role: Role):
        """
        Attach the necessary policies to read secrets from SSM and SecretsManager

        :param role:
        :param zone_id:
        :return:
        """
        # TODO: Extract this in a managed policy
        route53_policy = PolicyStatement(
            resources=["*"],
            effect=Effect.ALLOW,
            actions=[
                "route53:ListHostedZones",
                "route53:ListResourceRecordSets",
            ],
        )
        route53_recordset_policy = PolicyStatement(
            resources=["arn:aws:route53:::hostedzone/*"
                       ],  # To be restricted to interested zone
            effect=Effect.ALLOW,
            actions=[
                "route53:ChangeResourceRecordSets",
                "route53:ListTagsForResource",
            ],
        )
        role.add_to_policy(route53_policy)
        role.add_to_policy(route53_recordset_policy)
예제 #3
0
    def attach_iam_policies_to_role(cls, role: Role):
        """
        Attach the inline policies necessary to manage autoscaling using the kubernetes cluster autoscaler

        :param role:
        :return:
        """
        # TODO: Extract this in a managed policy
        policies: Dict[str, PolicyStatement] = {
            'cluster_autoscaler':
            PolicyStatement(
                resources=["*"],
                effect=Effect.ALLOW,
                actions=[
                    "autoscaling:DescribeAutoScalingGroups",
                    "autoscaling:DescribeAutoScalingInstances",
                    "autoscaling:DescribeLaunchConfigurations",
                    "autoscaling:DescribeTags",
                    "autoscaling:SetDesiredCapacity",
                    "autoscaling:TerminateInstanceInAutoScalingGroup",
                    "ec2:DescribeLaunchTemplateVersions",
                ]),
        }

        for policy in policies.values():
            role.add_to_policy(policy)
예제 #4
0
    def _create_lambdas(self):
        clean_pycache()

        for root, dirs, files in os.walk(LAMBDAS_DIR):
            for f in files:
                if f != "__init__.py":
                    continue

                parent_folder = os.path.basename(os.path.dirname(root))
                lambda_folder = os.path.basename(root)
                name = f"{parent_folder}-{lambda_folder}"
                lambda_config = self.lambdas_config[name]

                layers = []
                for layer_name in lambda_config["layers"]:
                    layers.append(self.layers[layer_name])

                lambda_role = Role(
                    self,
                    f"{name}_role",
                    assumed_by=ServicePrincipal(service="lambda.amazonaws.com")
                )
                for policy in lambda_config["policies"]:
                    lambda_role.add_to_policy(policy)
                lambda_role.add_managed_policy(
                    ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole"))

                lambda_args = {
                    "code": Code.from_asset(root),
                    "handler": "__init__.handle",
                    "runtime": Runtime.PYTHON_3_8,
                    "layers": layers,
                    "function_name": name,
                    "environment": lambda_config["variables"],
                    "role": lambda_role,
                    "timeout": Duration.seconds(lambda_config["timeout"]),
                    "memory_size": lambda_config["memory"],
                }
                if "concurrent_executions" in lambda_config:
                    lambda_args["reserved_concurrent_executions"] = lambda_config["concurrent_executions"]

                self.lambdas[name] = Function(self, name, **lambda_args)

        self.lambdas["sqs_handlers-post_anime"].add_event_source(SqsEventSource(self.post_anime_queue))

        Rule(
            self,
            "titles_updater",
            schedule=Schedule.cron(hour="2", minute="10"),
            targets=[LambdaFunction(self.lambdas["crons-titles_updater"])]
        )
        Rule(
            self,
            "episodes_updater",
            schedule=Schedule.cron(hour="4", minute="10"),
            targets=[LambdaFunction(self.lambdas["crons-episodes_updater"])]
        )
예제 #5
0
 def add_user_specific_policy(bucket: aws_s3.Bucket, role: aws_iam.Role, prefix: str):
     policy = aws_iam.PolicyStatement(
         effect=aws_iam.Effect.ALLOW,
         actions=["s3:GetObject", "s3:PutObject", "s3:DeleteObject"],
         resources=[
             f"arn:aws:s3:::{bucket.bucket_name}/{prefix}/${{cognito-identity.amazonaws.com:sub}}/*"  # noqa: E501
         ],
     )
     role.add_to_policy(policy)
예제 #6
0
 def add_public_policy(bucket: aws_s3.Bucket, role: aws_iam.Role, is_auth_role: bool):
     actions = ["s3:GetObject"]
     if is_auth_role:
         actions.extend(["s3:PutObject", "s3:DeleteObject"])
     policy = aws_iam.PolicyStatement(
         effect=aws_iam.Effect.ALLOW,
         actions=actions,
         resources=[f"arn:aws:s3:::{bucket.bucket_name}/public/*"],
     )
     role.add_to_policy(policy)
예제 #7
0
    def _configure_mutual_assume_role(self, role: iam.Role):
        self._roles.instance_role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                principals=[iam.ArnPrincipal(role.role_arn)],
                                actions=['sts:AssumeRole']))

        role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                principals=[
                                    iam.ArnPrincipal(
                                        self._roles.instance_role.role_arn)
                                ],
                                actions=['sts:AssumeRole']))
예제 #8
0
    def add_list_policy(bucket: aws_s3.Bucket, role: aws_iam.Role,
                        is_auth_role: bool):
        policy = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=["s3:ListBucket"],
            resources=[f"arn:aws:s3:::{bucket.bucket_name}"],
        )

        prefixes = ["public/", "public/*", "protected/", "protected/*"]
        if is_auth_role:
            prefixes.extend([
                "private/${cognito-identity.amazonaws.com:sub}/",
                "private/${cognito-identity.amazonaws.com:sub}/*",
            ])
        policy.add_conditions({"StringLike": {"s3:prefix": prefixes}})
        role.add_to_policy(policy)
예제 #9
0
    def _create_lambdas(self):
        for root, dirs, files in os.walk(LAMBDAS_DIR):
            for f in files:
                if f != "__init__.py":
                    continue

                parent_folder = os.path.basename(os.path.dirname(root))
                lambda_folder = os.path.basename(root)
                name = f"{parent_folder}-{lambda_folder}"
                lambda_config = self.lambdas_config[name]

                layers = []
                for layer_name in lambda_config["layers"]:
                    layers.append(self.layers[layer_name])

                lambda_role = Role(self,
                                   f"{name}_role",
                                   assumed_by=ServicePrincipal(
                                       service="lambda.amazonaws.com"))
                for policy in lambda_config["policies"]:
                    lambda_role.add_to_policy(policy)
                lambda_role.add_managed_policy(
                    ManagedPolicy.from_aws_managed_policy_name(
                        "service-role/AWSLambdaBasicExecutionRole"))

                self.lambdas[name] = Function(
                    self,
                    name,
                    code=Code.from_asset(root),
                    handler="__init__.handle",
                    runtime=Runtime.PYTHON_3_8,
                    layers=layers,
                    function_name=name,
                    environment=lambda_config["variables"],
                    role=lambda_role,
                    timeout=Duration.seconds(lambda_config["timeout"]),
                    memory_size=lambda_config["memory"],
                )

        Rule(self,
             "update_eps",
             schedule=Schedule.cron(hour="2", minute="10"),
             targets=[LambdaFunction(self.lambdas["cron-update_eps"])])
예제 #10
0
    def create_default_infrastructure_config(
            self, construct_id: str) -> CfnInfrastructureConfiguration:
        """
        Create the default infrastructure config, which defines the permissions needed by Image Builder during
        image creation.
        """
        image_builder_role_name = f"DeadlineMachineImageBuilderRole{construct_id}"
        image_builder_role = Role(
            self,
            image_builder_role_name,
            assumed_by=ServicePrincipal("ec2.amazonaws.com"),
            role_name=image_builder_role_name)
        image_builder_role.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                'EC2InstanceProfileForImageBuilder'))
        image_builder_role.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                'AmazonSSMManagedInstanceCore'))

        image_builder_role.add_to_policy(
            PolicyStatement(actions=[
                's3:Get*',
                's3:List*',
            ],
                            resources=['arn:aws:s3:::thinkbox-installers/*']))

        image_builder_profile_name = f"DeadlineMachineImageBuilderPolicy{construct_id}"
        image_builder_profile = CfnInstanceProfile(
            self,
            image_builder_profile_name,
            instance_profile_name=image_builder_profile_name,
            roles=[image_builder_role_name])
        image_builder_profile.add_depends_on(
            image_builder_role.node.default_child)

        infrastructure_configuration = CfnInfrastructureConfiguration(
            self,
            f"InfrastructureConfig{construct_id}",
            name=f"DeadlineInfrastructureConfig{construct_id}",
            instance_profile_name=image_builder_profile_name)
        infrastructure_configuration.add_depends_on(image_builder_profile)

        return infrastructure_configuration
예제 #11
0
    def attach_iam_policies_to_role(cls, role: Role):
        """
        Attach the necessary policies to read secrets from SSM and SecretsManager

        :param role:
        :return:
        """
        # TODO: Extract this in a managed policy
        secretsmanager_readonly_policy = PolicyStatement(
            resources=["*"],
            effect=Effect.ALLOW,
            actions=[
                "secretsmanager:GetResourcePolicy",
                "secretsmanager:GetSecretValue",
                "secretsmanager:DescribeSecret",
                "secretsmanager:ListSecretVersionIds",
            ]
        )
        role.add_to_policy(secretsmanager_readonly_policy)
        role.add_managed_policy(ManagedPolicy.from_aws_managed_policy_name('AmazonSSMReadOnlyAccess'))
    def __init__(self, scope: core.Construct, id: str, prefix: str,
                 source_bucket: s3.Bucket, dest_bucket: s3.Bucket,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        suffix = Fn.select(
            4, Fn.split('-', Fn.select(2, Fn.split('/', self.stack_id))))

        # KMS key for Kinesis Data Streams
        self.__kms_key = Key(scope=self,
                             id='kms-kinesis',
                             alias='custom/kinesis',
                             description='KMS key for Kinesis Data Streams',
                             enable_key_rotation=True)

        # Create Kinesis streams
        self.__sale_stream = Stream(scope=self,
                                    id="saleStream",
                                    stream_name="ara-web-sale",
                                    encryption_key=self.__kms_key)
        self.__address_stream = Stream(scope=self,
                                       id="addressStream",
                                       stream_name="ara-web-customer-address",
                                       encryption_key=self.__kms_key)
        self.__customer_stream = Stream(scope=self,
                                        id="customerStream",
                                        stream_name="ara-web-customer",
                                        encryption_key=self.__kms_key)

        # Role for the KDA service
        kda_role = Role(scope=self,
                        id='KinesisAnalyticsRole',
                        assumed_by=ServicePrincipal(
                            service='kinesisanalytics.amazonaws.com'))

        # Grant read on Kinesis streams
        self.__customer_stream.grant_read(kda_role)
        self.__address_stream.grant_read(kda_role)
        self.__sale_stream.grant_read(kda_role)

        # Grant read on source bucket (reference data)
        source_bucket.grant_read(kda_role)
        # Grant write on destination bucket
        dest_bucket.grant_write(kda_role)

        kda_role.add_to_policy(
            PolicyStatement(actions=['kinesis:ListShards'],
                            resources=[
                                self.__customer_stream.stream_arn,
                                self.__address_stream.stream_arn,
                                self.__sale_stream.stream_arn
                            ]))

        # Create Elasticsearch domain
        # TODO: use VPC subnets
        es_domain = EsDomain(scope=self,
                             id='EsDomain',
                             application_prefix=prefix,
                             suffix=suffix,
                             kda_role=kda_role)

        # Create the KDA application after the Elasticsearch service
        kda_app = KdaApplication(scope=self,
                                 id='KdaApplication',
                                 es_domain=es_domain.es_domain,
                                 kda_role=kda_role,
                                 source_bucket=source_bucket,
                                 dest_bucket=dest_bucket)

        core.Tags.of(self).add('module-name', 'streaming')
예제 #13
0
    def __init__(self, scope: core.Construct, id: str, es_domain: CfnDomain, kda_role: iam.Role,
                 source_bucket: s3.Bucket, dest_bucket: s3.Bucket, **kwargs):
        super().__init__(scope, id, **kwargs)

        stack = Stack.of(self)

        kda_role.add_to_policy(PolicyStatement(actions=['cloudwatch:PutMetricData'],
                                               resources=['*']))

        artifacts_bucket_arn = 'arn:aws:s3:::' + _config.ARA_BUCKET.replace("s3://", "")
        kda_role.add_to_policy(PolicyStatement(actions=['s3:GetObject', 's3:GetObjectVersion'],
                                               resources=[artifacts_bucket_arn, artifacts_bucket_arn + '/binaries/*']))
        log_group = logs.LogGroup(scope=self,
                                  id='KdaLogGroup',
                                  retention=logs.RetentionDays.ONE_WEEK,
                                  removal_policy=RemovalPolicy.DESTROY)

        log_stream = logs.LogStream(scope=self,
                                    id='KdaLogStream',
                                    log_group=log_group,
                                    removal_policy=RemovalPolicy.DESTROY)

        log_stream_arn = stack.format_arn(service='logs',
                                          resource='log-group',
                                          resource_name=log_group.log_group_name + ':log-stream:' +
                                                        log_stream.log_stream_name,
                                          sep=':')

        # TODO: restrict
        kda_role.add_to_policy(PolicyStatement(actions=['logs:*'],
                                               resources=[stack.format_arn(service='logs', resource='*')]))

        kda_role.add_to_policy(PolicyStatement(actions=['logs:DescribeLogStreams', 'logs:DescribeLogGroups'],
                                               resources=[log_group.log_group_arn,
                                                          stack.format_arn(service='logs', resource='log-group',
                                                                           resource_name='*')]))

        kda_role.add_to_policy(PolicyStatement(actions=['logs:PutLogEvents'],
                                               resources=[log_stream_arn]))

        kda_role.add_to_policy(PolicyStatement(actions=['es:ESHttp*'],
                                               resources=[stack.format_arn(service='es', resource='domain',
                                                                           resource_name=es_domain.domain_name + '/*')]))

        # TODO: restrict
        kda_role.add_to_policy(PolicyStatement(actions=['s3:*'],
                                               resources=['arn:aws:s3::::*']))

        # Define delivery stream
        # delivery_stream_name = 'clean_delivery_stream'
        #
        # s3_configuration = {
        #     'bucketArn': '',
        #     'compressionFormat': 'Snappy',
        #     'dataFormatConversionConfiguration': {
        #         'enabled': True,
        #         'inputFormatConfiguration': {'deserializer': },
        #         'outputFormatConfiguration': {'serializer': {'parquetSerDe': }},
        #         'schemaConfiguration': {}
        #     },
        #     'prefix': 'streaming'
        # }
        #
        # delivery_stream = CfnDeliveryStream(scope=self,
        #                                     id='Firehose Delivery Stream',
        #                                     delivery_stream_name=delivery_stream_name,
        #                                     delivery_stream_type='DirectPut',
        #                                     extended_s3_destination_configuration=s3_configuration
        #                                     )

        # Define KDA application
        application_configuration = {
            'environmentProperties': {
                'propertyGroups': [
                    {
                        'propertyGroupId': 'ConsumerConfigProperties',
                        'propertyMap': {
                            'CustomerStream': scope.customer_stream.stream_name,
                            'AddressStream': scope.address_stream.stream_name,
                            'SaleStream': scope.sale_stream.stream_name,
                            'PromoDataPath': source_bucket.s3_url_for_object('promo'),
                            'ItemDataPath': source_bucket.s3_url_for_object('item'),
                            'aws.region': scope.region
                        }
                    },
                    {
                        'propertyGroupId': 'ProducerConfigProperties',
                        'propertyMap': {
                            'ElasticsearchHost': 'https://' + es_domain.attr_domain_endpoint + ':443',
                            'Region': scope.region,
                            'DenormalizedSalesS3Path': dest_bucket.s3_url_for_object() + '/',
                            'IndexName': 'ara-write'
                        }
                    }
                ]
            },
            'applicationCodeConfiguration': {
                'codeContent': {
                    's3ContentLocation': {
                        'bucketArn': artifacts_bucket_arn,
                        'fileKey': 'binaries/stream-processing-1.1.jar'
                    }
                },
                'codeContentType': 'ZIPFILE'
            },
            'flinkApplicationConfiguration': {
                'parallelismConfiguration': {
                    'configurationType': 'DEFAULT'
                },
                'checkpointConfiguration': {
                    'configurationType': 'DEFAULT'
                },
                'monitoringConfiguration': {
                    'logLevel': 'DEBUG',
                    'metricsLevel': 'TASK',
                    'configurationType': 'CUSTOM'
                }
            },
            'applicationSnapshotConfiguration': {
                'snapshotsEnabled': False
            }
        }

        self.__app = CfnApplicationV2(scope=self,
                                      id='KDA application',
                                      runtime_environment='FLINK-1_11',
                                      application_name='KDA-application',
                                      service_execution_role=kda_role.role_arn,
                                      application_configuration=application_configuration)

        logging = CfnApplicationCloudWatchLoggingOptionV2(scope=self, id='KDA application logging',
                                                          application_name=self.__app.ref,
                                                          cloud_watch_logging_option={'logStreamArn': log_stream_arn})

        logging.apply_removal_policy(policy=RemovalPolicy.RETAIN, apply_to_update_replace_policy=True,
                                     default=RemovalPolicy.RETAIN)

        # Use a custom resource to start the application
        create_params = {
            'ApplicationName': self.__app.ref,
            'RunConfiguration': {
                'ApplicationRestoreConfiguration': {
                    'ApplicationRestoreType': 'SKIP_RESTORE_FROM_SNAPSHOT'
                },
                'FlinkRunConfiguration': {
                    'AllowNonRestoredState': True
                }
            }
        }

        # See https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/ for service name, actions and parameters
        create_action = AwsSdkCall(service='KinesisAnalyticsV2',
                                   action='startApplication',
                                   parameters=create_params,
                                   physical_resource_id=PhysicalResourceId.of(self.__app.ref + '-start'))

        delete_action = AwsSdkCall(service='KinesisAnalyticsV2',
                                   action='stopApplication',
                                   parameters={'ApplicationName': self.__app.ref, 'Force': True})

        custom_resource = AwsCustomResource(scope=self,
                                            id='KdaStartAndStop',
                                            on_create=create_action,
                                            on_delete=delete_action,
                                            policy=AwsCustomResourcePolicy.from_statements([PolicyStatement(
                                                actions=['kinesisanalytics:StartApplication',
                                                         'kinesisanalytics:StopApplication',
                                                         'kinesisanalytics:DescribeApplication',
                                                         'kinesisanalytics:UpdateApplication'], resources=[
                                                    stack.format_arn(service='kinesisanalytics', resource='application',
                                                                     resource_name=self.app.application_name)])]))

        custom_resource.node.add_dependency(self.app)
예제 #14
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        alexa_assets = os.path.dirname(
            os.path.realpath(__file__)) + "/../skill"
        asset = assets.Asset(self, 'SkillAsset', path=alexa_assets)

        # role to access bucket
        role = Role(self,
                    'Role',
                    assumed_by=CompositePrincipal(
                        ServicePrincipal('alexa-appkit.amazon.com'),
                        ServicePrincipal('cloudformation.amazonaws.com')))

        # Allow the skill resource to access the zipped skill package
        role.add_to_policy(
            PolicyStatement(
                actions=['S3:GetObject'],
                resources=[
                    f'arn:aws:s3:::{asset.s3_bucket_name}/{asset.s3_object_key}'
                ]))

        # DynamoDB Table
        users_table = dynamo_db.Table(
            self,
            'Users',
            partition_key=dynamo_db.Attribute(
                name='userId', type=dynamo_db.AttributeType.STRING),
            billing_mode=dynamo_db.BillingMode.PAY_PER_REQUEST,
            removal_policy=core.RemovalPolicy.DESTROY)

        # install node dependencies for lambdas
        lambda_folder = os.path.dirname(
            os.path.realpath(__file__)) + "/../lambda_fns"
        subprocess.check_call("npm i".split(),
                              cwd=lambda_folder,
                              stdout=subprocess.DEVNULL)
        subprocess.check_call("npm run build".split(),
                              cwd=lambda_folder,
                              stdout=subprocess.DEVNULL)

        alexa_lambda = _lambda.Function(
            self,
            "AlexaLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,
            code=_lambda.Code.from_asset("lambda_fns"),
            handler="lambda.handler",
            environment={"USERS_TABLE": users_table.table_name})

        # grant the lambda role read/write permissions to our table
        users_table.grant_read_write_data(alexa_lambda)

        # create the skill
        skill = alexa_ask.CfnSkill(
            self,
            'the-alexa-skill',
            vendor_id='',
            authentication_configuration={
                'clientId': '',
                'clientSecret': '',
                'refreshToken': ''
            },
            skill_package={
                's3Bucket': asset.s3_bucket_name,
                's3Key': asset.s3_object_key,
                's3BucketRole': role.role_arn,
                'overrides': {
                    'manifest': {
                        'apis': {
                            'custom': {
                                'endpoint': {
                                    'uri': alexa_lambda.function_arn
                                }
                            }
                        }
                    }
                }
            })

        ###
        # Allow the Alexa service to invoke the fulfillment Lambda.
        # In order for the Skill to be created, the fulfillment Lambda
        # must have a permission allowing Alexa to invoke it, this causes
        # a circular dependency and requires the first deploy to allow all
        # Alexa skills to invoke the lambda, subsequent deploys will work
        # when specifying the eventSourceToken
        ###
        alexa_lambda.add_permission(
            'AlexaPermission',
            # eventSourceToken: skill.ref,
            principal=ServicePrincipal('alexa-appkit.amazon.com'),
            action='lambda:InvokeFunction')
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        db_name = 'petclinic'
        db_cluster = 'petclinic-serverless-graphql'

        petclinic_graphql_api = CfnGraphQLApi(
            self, 'PetClinicApi',
            name='PetClinicApi',
            authentication_type='API_KEY'
        )

        petclinic_graphql_key = CfnApiKey(
            self, 'ItemsApiKey',
            api_id=petclinic_graphql_api.attr_api_id
        )

        with open('./definition/petclinic.graphql', 'rt') as f:
            schema_def = f.read()

        petclinic_schema = CfnGraphQLSchema(
            self, 'PetclinicSchema',
            api_id=petclinic_graphql_api.attr_api_id,
            definition=schema_def
        )

        serverless_rds_secret = Secret(
            self, 'PetclinicRDSSecret',
            generate_secret_string=SecretStringGenerator(
                generate_string_key='password',
                secret_string_template='{"username":"******"}',
                exclude_characters= '"@/',
                password_length=16
            )
        )

        serverless_rds_cluster = CfnDBCluster(
            self, 'PetclinicRDSServerless',
            engine='aurora',
            database_name=db_name,
            db_cluster_identifier=db_cluster,
            engine_mode='serverless',
            master_username=serverless_rds_secret.secret_value_from_json('username').to_string(),
            master_user_password=serverless_rds_secret.secret_value_from_json('password').to_string(),
            scaling_configuration=CfnDBCluster.ScalingConfigurationProperty(
              min_capacity=1,
              max_capacity=2,
              auto_pause=False
            )
        )

        serverless_rds_cluster.apply_removal_policy(core.RemovalPolicy.DESTROY)

        serverless_rds_arn='arn:aws:rds:' + self.region + ':' + self.account + ':cluster:' + db_cluster

        website_bucket = Bucket(self, 'PetclinicWebsite',
            website_index_document='index.html',
            public_read_access=True,
            removal_policy=core.RemovalPolicy.DESTROY
        )

        deployment = BucketDeployment(self, 'PetclinicDeployWebsite',
          sources=[Source.asset('../frontend/public')],
          destination_bucket=website_bucket,
          retain_on_delete=False
          #destination_key_prefix='web/static'
        )

        iam_policy = [PolicyStatement(
            actions=["secretsmanager:GetSecretValue"],
            effect=Effect.ALLOW,
            resources=[serverless_rds_secret.secret_arn]
           ),PolicyStatement(
             actions=["rds-data:ExecuteStatement",
                "rds-data:DeleteItems",
                "rds-data:ExecuteSql",
                "rds-data:GetItems",
                "rds-data:InsertItems",
                "rds-data:UpdateItems"],
             effect=Effect.ALLOW,
             resources=[serverless_rds_arn, serverless_rds_arn + ':*']
          ),PolicyStatement(
              actions=["rds:*"],
              effect=Effect.ALLOW,
              resources=[serverless_rds_arn, serverless_rds_arn + ':*']
          ),PolicyStatement(
                actions=[ "s3:PutObject","s3:PutObjectAcl","s3:PutObjectVersionAcl","s3:GetObject"],
                effect=Effect.ALLOW,
                resources=[website_bucket.bucket_arn + "/*"]
          ),PolicyStatement(
                actions=[ "s3:ListBucket"],
                effect=Effect.ALLOW,
                resources=[website_bucket.bucket_arn]
          )]

        init_resource = CustomResource(self, "PetlinicInitCustomResource",
            provider=CustomResourceProvider.lambda_(
                SingletonFunction(
                    self, "CustomResourceSingleton",
                    uuid="f7d4f730-4ee1-11e8-9c2d-fa7ae01bbebc",
                    code=Code.from_asset('./custom-resource-code'),
                    handler="index.handler",
                    timeout=core.Duration.seconds(600),
                    runtime=Runtime.PYTHON_3_7,
                    initial_policy=iam_policy
                )
            ),
            properties={
                "DBClusterIdentifier": db_cluster,
                "DBClusterArn": serverless_rds_arn,
                "DBSecretArn": serverless_rds_secret.secret_arn,
                "DBName": db_name,
                "Bucket": website_bucket.bucket_name,
                "GraphqlApi": petclinic_graphql_api.attr_graph_ql_url,
                "GraphqlKey": petclinic_graphql_key.attr_api_key
            }
        )

        petclinic_rds_role = Role(
            self, 'PetclinicRDSRole',
            assumed_by=ServicePrincipal('appsync.amazonaws.com')
        )

        petclinic_rds_role.add_to_policy(iam_policy[0])
        petclinic_rds_role.add_to_policy(iam_policy[1])

        data_source = CfnDataSource(
            self, 'PetclinicRDSDatesource',
            api_id=petclinic_graphql_api.attr_api_id,
            type='RELATIONAL_DATABASE',
            name='PetclinicRDSDatesource',
            relational_database_config=CfnDataSource.RelationalDatabaseConfigProperty(
                relational_database_source_type='RDS_HTTP_ENDPOINT',
                rds_http_endpoint_config=CfnDataSource.RdsHttpEndpointConfigProperty(
                    aws_region=self.region,
                    aws_secret_store_arn=serverless_rds_secret.secret_arn,
                    database_name='petclinic',
                    db_cluster_identifier=serverless_rds_arn
                )
            ),
            service_role_arn=petclinic_rds_role.role_arn 
        )
        data_source.add_depends_on(petclinic_schema)
        data_source.add_depends_on(serverless_rds_cluster)

        query_req_path = './definition/template/query/request/'
        query_res_path = './definition/template/query/response/'

        for req_file in os.listdir(query_req_path):
            query_name = req_file.split('.')[0]
            with open(query_req_path + req_file, 'rt') as f:
                query_req = f.read()
            with open(query_res_path + query_name + '.vm', 'rt') as f:
                query_res = f.read()
            pettypes_resolver = CfnResolver(
                self, query_name,
                api_id=petclinic_graphql_api.attr_api_id,
                type_name='Query',
                field_name=query_name,
                data_source_name=data_source.name,
                request_mapping_template=query_req,
                response_mapping_template=query_res
            )
            pettypes_resolver.add_depends_on(data_source)

        func_dict = {}

        func_req_path = './definition/template/function/request/'
        func_res_path = './definition/template/function/response/'

        for req_file in os.listdir(func_req_path):
            func_name = req_file.split('.')[0]
            with open(func_req_path + req_file) as f:
                func_req = f.read()
            with open(func_res_path + func_name + '.vm') as f:
                func_res = f.read()
            func_dict[func_name] = CfnFunctionConfiguration(
                self, func_name,
                api_id=petclinic_graphql_api.attr_api_id,
                data_source_name=data_source.name,
                name=func_name,
                function_version='2018-05-29',
                request_mapping_template=func_req,
                response_mapping_template=func_res
            )
            func_dict[func_name].add_depends_on(data_source)

        query_owner = CfnResolver(
            self, 'QueryOnwer',
            api_id=petclinic_graphql_api.attr_api_id,
            kind='PIPELINE',
            type_name='Query',
            field_name='owner',
            request_mapping_template="{}",
            response_mapping_template="$util.toJson($ctx.result)",
            pipeline_config=CfnResolver.PipelineConfigProperty(
                functions=[func_dict['Query_Owner_getOwnerById'].attr_function_id,
                 func_dict['Query_Owner_getPetsByOwner'].attr_function_id,
                 func_dict['Query_Owner_getVistsByPet'].attr_function_id]
            )
        )

        query_owner.add_depends_on(func_dict['Query_Owner_getOwnerById'])
        query_owner.add_depends_on(func_dict['Query_Owner_getPetsByOwner'])
        query_owner.add_depends_on(func_dict['Query_Owner_getVistsByPet'])

        query_all_owners = CfnResolver(
            self, 'QueryAllOnwers',
            api_id=petclinic_graphql_api.attr_api_id,
            kind='PIPELINE',
            type_name='Query',
            field_name='owners',
            request_mapping_template="{}",
            response_mapping_template="$util.toJson($ctx.result)",
            pipeline_config=CfnResolver.PipelineConfigProperty(
                functions=[func_dict['Query_Owners_getAllOwners'].attr_function_id,
                 func_dict['Query_Owners_getPetsByOwner'].attr_function_id]
            )
        )

        query_all_owners.add_depends_on(func_dict['Query_Owners_getAllOwners'])
        query_all_owners.add_depends_on(func_dict['Query_Owners_getPetsByOwner'])

        query_pet = CfnResolver(
            self, 'QueryPet',
            api_id=petclinic_graphql_api.attr_api_id,
            kind='PIPELINE',
            type_name='Query',
            field_name='pet',
            request_mapping_template="{}",
            response_mapping_template="$util.toJson($ctx.result)",
            pipeline_config=CfnResolver.PipelineConfigProperty(
                functions=[func_dict['Query_Pet_getPetById'].attr_function_id,
                 func_dict['Query_Pet_getVisitByPet'].attr_function_id]
            )
        )

        query_pet.add_depends_on(func_dict['Query_Pet_getPetById'])
        query_pet.add_depends_on(func_dict['Query_Pet_getVisitByPet'])   

        query_vets = CfnResolver(
            self, 'QueryVets',
            api_id=petclinic_graphql_api.attr_api_id,
            kind='PIPELINE',
            type_name='Query',
            field_name='vets',
            request_mapping_template="{}",
            response_mapping_template="$util.toJson($ctx.result)",
            pipeline_config=CfnResolver.PipelineConfigProperty(
                functions=[func_dict['Query_Vets_getVets'].attr_function_id,
                 func_dict['Query_Vets_getSpecByVets'].attr_function_id]
            )
        )

        query_vets.add_depends_on(func_dict['Query_Vets_getVets'])
        query_vets.add_depends_on(func_dict['Query_Vets_getSpecByVets'])

        mutation_req_path = './definition/template/mutation/request/'
        mutation_res_path = './definition/template/mutation/response/'

        for req_file in os.listdir(mutation_req_path):
            mutation_name = req_file.split('.')[0]
            with open(mutation_req_path + req_file) as f:
                func_req = f.read()
            with open(mutation_res_path + mutation_name + '.vm') as f:
                func_res = f.read()
            mutation = CfnResolver(
                self, mutation_name,
                api_id=petclinic_graphql_api.attr_api_id,
                type_name='Mutation',
                field_name=mutation_name,
                data_source_name=data_source.name,
                request_mapping_template=func_req,
                response_mapping_template=func_res
            )
            mutation.add_depends_on(data_source)

        core.CfnOutput(self,"GraphqlPetclinicWebsiteUrl",export_name="GraphqlPetclinicWebsiteUrl",value=website_bucket.bucket_website_url)