Beispiel #1
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        with open(INPUT_WORKFLOW_FILE) as f:
            workflow_dict = json.load(f)
            glue_role = Role(
                self,
                "dummy_glue_job_role",
                assumed_by=ServicePrincipal("glue.amazonaws.com"),
                managed_policies=[
                    ManagedPolicy.from_aws_managed_policy_name(
                        "service-role/AWSGlueServiceRole")
                ],
            )
            # mock job
            job_command = CfnJob.JobCommandProperty(
                name='pythonshell', script_location=SCRIPT_LOCATION)

            for workflow_item in workflow_dict:
                for node in workflow_item['nodes']:
                    triggered_job_name = node['id']
                    job = CfnJob(self,
                                 triggered_job_name,
                                 name=triggered_job_name,
                                 command=job_command,
                                 role=glue_role.role_name)
Beispiel #2
0
    def __init__(self, scope: BaseApp, id: str, vpc: Vpc, env_fqdn: str,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        kubernetes_version = scope.environment_config.get(
            'eks', {}).get('kubernetesVersion')
        cluster_name = scope.prefixed_str(
            scope.environment_config.get('eks', {}).get('clusterName'))

        cluster_admin_role = Role(
            self,
            scope.prefixed_str('EKS-AdminRole'),
            role_name=scope.prefixed_str('EKS-AdminRole'),
            assumed_by=AccountRootPrincipal(),
        )

        self.__cluster = eks_cluster = Cluster(
            self,
            cluster_name,
            cluster_name=cluster_name,
            vpc=vpc,
            version=KubernetesVersion.of(kubernetes_version),
            default_capacity=0,  # We define later the capacity
            masters_role=cluster_admin_role,
            vpc_subnets=self._get_control_plane_subnets(
                scope),  # Control plane subnets
        )

        for profile in scope.environment_config.get('eks', {}).get(
                'fargateProfiles', []):
            eks_cluster.add_fargate_profile(
                profile.get('name'),
                selectors=[
                    Selector(namespace=profile.get('namespace'),
                             labels=profile.get('labels'))
                ])

        asg_fleets = []
        for fleet in scope.environment_config.get('eks',
                                                  {}).get('workerNodesFleets'):
            if fleet.get('type') == 'managed':
                self.add_managed_fleet(eks_cluster, fleet)
            if fleet.get('type') == 'ASG':
                asg_fleets += self.add_asg_fleet(scope, eks_cluster, fleet)

        self._enable_cross_fleet_communication(asg_fleets)

        # Base cluster applications
        MetricsServer.add_to_cluster(eks_cluster)
        ClusterAutoscaler.add_to_cluster(eks_cluster, kubernetes_version)
        ExternalSecrets.add_to_cluster(eks_cluster)
        CertManager.add_to_cluster(eks_cluster)

        # Monitoring applications
        PrometheusOperator.add_to_cluster(eks_cluster)
        Grafana.add_to_cluster(eks_cluster, env_fqdn)

        # Logging & tracing applications
        Fluentd.add_to_cluster(eks_cluster)
        Loki.add_to_cluster(eks_cluster)
Beispiel #3
0
    def __role(self) -> Role:
        """
        A role for custom resource which manages git commits to codecommit.

        :return: Custom resource's role.
        """
        return Role(
            self.__stack,
            self.__prefix + 'CiCdLambdaCustomCommitRole',
            inline_policies={
                self.__prefix + 'CiCdLambdaCustomCommitPolicy':
                PolicyDocument(statements=[
                    PolicyStatement(
                        actions=[
                            "codecommit:CreateCommit",
                        ],
                        resources=[self.__code_repository.repository_arn],
                        effect=Effect.ALLOW),
                    PolicyStatement(actions=[
                        "logs:CreateLogGroup", "logs:CreateLogStream",
                        "logs:PutLogEvents"
                    ],
                                    resources=['*'],
                                    effect=Effect.ALLOW)
                ])
            },
            assumed_by=ServicePrincipal('lambda.amazonaws.com'))
Beispiel #4
0
    def _create_s3_access_role(self, identity_pool: CfnIdentityPool,
                               s3_bucket: Bucket) -> Role:
        role = Role(self,
                    'DemoRole',
                    role_name='CognitoDemoBucketAccess',
                    assumed_by=WebIdentityPrincipal(
                        'cognito-identity.amazonaws.com',
                        conditions={
                            'StringEquals': {
                                'cognito-identity.amazonaws.com:aud':
                                identity_pool.ref
                            }
                        }),
                    inline_policies={
                        'ListBucket':
                        PolicyDocument(statements=[
                            PolicyStatement(effect=Effect.ALLOW,
                                            actions=['s3:ListBucket'],
                                            resources=[s3_bucket.bucket_arn])
                        ])
                    })

        CfnOutput(self, 'ROLE_ARN', value=role.role_arn)

        return role
Beispiel #5
0
    def _create_lambdas(self):
        clean_pycache()

        for root, dirs, files in os.walk(LAMBDAS_DIR):
            for f in files:
                if f != "__init__.py":
                    continue

                parent_folder = os.path.basename(os.path.dirname(root))
                lambda_folder = os.path.basename(root)
                name = f"{parent_folder}-{lambda_folder}"
                lambda_config = self.lambdas_config[name]

                layers = []
                for layer_name in lambda_config["layers"]:
                    layers.append(self.layers[layer_name])

                lambda_role = Role(
                    self,
                    f"{name}_role",
                    assumed_by=ServicePrincipal(service="lambda.amazonaws.com")
                )
                for policy in lambda_config["policies"]:
                    lambda_role.add_to_policy(policy)
                lambda_role.add_managed_policy(
                    ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole"))

                lambda_args = {
                    "code": Code.from_asset(root),
                    "handler": "__init__.handle",
                    "runtime": Runtime.PYTHON_3_8,
                    "layers": layers,
                    "function_name": name,
                    "environment": lambda_config["variables"],
                    "role": lambda_role,
                    "timeout": Duration.seconds(lambda_config["timeout"]),
                    "memory_size": lambda_config["memory"],
                }
                if "concurrent_executions" in lambda_config:
                    lambda_args["reserved_concurrent_executions"] = lambda_config["concurrent_executions"]

                self.lambdas[name] = Function(self, name, **lambda_args)

        self.lambdas["sqs_handlers-post_anime"].add_event_source(SqsEventSource(self.post_anime_queue))

        Rule(
            self,
            "titles_updater",
            schedule=Schedule.cron(hour="2", minute="10"),
            targets=[LambdaFunction(self.lambdas["crons-titles_updater"])]
        )
        Rule(
            self,
            "episodes_updater",
            schedule=Schedule.cron(hour="4", minute="10"),
            targets=[LambdaFunction(self.lambdas["crons-episodes_updater"])]
        )
Beispiel #6
0
 def __init__(self, scope: Construct, _id: str, **kwargs) -> None:
     super().__init__(scope, _id, **kwargs)
     task_definition = FargateTaskDefinition(
         self,
         'TaskDefinition',
         cpu=256,
         memory_limit_mib=512,
         execution_role=Role(
             self,
             'ExecutionRole',
             assumed_by=cast(IPrincipal, ServicePrincipal('ecs-tasks.amazonaws.com'))
         ),
         task_role=Role(
             self,
             'TaskRole',
             assumed_by=cast(IPrincipal, ServicePrincipal('ecs-tasks.amazonaws.com')),
             managed_policies=[
                 ManagedPolicy.from_aws_managed_policy_name('AmazonSESFullAccess')
             ]
         )
     )
     task_definition.add_container(
         'Container',
         image=ContainerImage.from_asset(
             getcwd(),
             file='Dockerfile',
             repository_name='jqbx-bot',
             exclude=['cdk.out']
         ),
         command=['pipenv', 'run', 'python', '-u', '-m', 'src.main'],
         environment={
             'SPOTIFY_USER_ID': environ.get('SPOTIFY_USER_ID'),
             'JQBX_ROOM_ID': environ.get('JQBX_ROOM_ID'),
             'JQBX_BOT_DISPLAY_NAME': environ.get('JQBX_BOT_DISPLAY_NAME'),
             'JQBX_BOT_IMAGE_URL': environ.get('JQBX_BOT_IMAGE_URL'),
             'DATA_SERVICE_BASE_URL': environ.get('DATA_SERVICE_BASE_URL')
         },
         logging=AwsLogDriver(
             stream_prefix='jqbx-bot',
             log_group=LogGroup(self, 'LogGroup')
         )
     )
     cluster = Cluster(self, '%sCluster' % _id)
     FargateService(self, '%sService' % _id, cluster=cluster, task_definition=task_definition, desired_count=1)
    def create_role(self):
        service_principal = ServicePrincipal('lambda.amazonaws.com')

        return Role(
            self,
            'Role',
            assumed_by=service_principal,
            role_name='sfn_lambda_role',
            managed_policies=[self.managed_policy],
        )
Beispiel #8
0
    def _create_lambdas(self):
        for root, dirs, files in os.walk(LAMBDAS_DIR):
            for f in files:
                if f != "__init__.py":
                    continue

                parent_folder = os.path.basename(os.path.dirname(root))
                lambda_folder = os.path.basename(root)
                name = f"{parent_folder}-{lambda_folder}"
                lambda_config = self.lambdas_config[name]

                layers = []
                for layer_name in lambda_config["layers"]:
                    layers.append(self.layers[layer_name])

                lambda_role = Role(self,
                                   f"{name}_role",
                                   assumed_by=ServicePrincipal(
                                       service="lambda.amazonaws.com"))
                for policy in lambda_config["policies"]:
                    lambda_role.add_to_policy(policy)
                lambda_role.add_managed_policy(
                    ManagedPolicy.from_aws_managed_policy_name(
                        "service-role/AWSLambdaBasicExecutionRole"))

                self.lambdas[name] = Function(
                    self,
                    name,
                    code=Code.from_asset(root),
                    handler="__init__.handle",
                    runtime=Runtime.PYTHON_3_8,
                    layers=layers,
                    function_name=name,
                    environment=lambda_config["variables"],
                    role=lambda_role,
                    timeout=Duration.seconds(lambda_config["timeout"]),
                    memory_size=lambda_config["memory"],
                )

        Rule(self,
             "update_eps",
             schedule=Schedule.cron(hour="2", minute="10"),
             targets=[LambdaFunction(self.lambdas["cron-update_eps"])])
    def create_default_infrastructure_config(
            self, construct_id: str) -> CfnInfrastructureConfiguration:
        """
        Create the default infrastructure config, which defines the permissions needed by Image Builder during
        image creation.
        """
        image_builder_role_name = f"DeadlineMachineImageBuilderRole{construct_id}"
        image_builder_role = Role(
            self,
            image_builder_role_name,
            assumed_by=ServicePrincipal("ec2.amazonaws.com"),
            role_name=image_builder_role_name)
        image_builder_role.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                'EC2InstanceProfileForImageBuilder'))
        image_builder_role.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                'AmazonSSMManagedInstanceCore'))

        image_builder_role.add_to_policy(
            PolicyStatement(actions=[
                's3:Get*',
                's3:List*',
            ],
                            resources=['arn:aws:s3:::thinkbox-installers/*']))

        image_builder_profile_name = f"DeadlineMachineImageBuilderPolicy{construct_id}"
        image_builder_profile = CfnInstanceProfile(
            self,
            image_builder_profile_name,
            instance_profile_name=image_builder_profile_name,
            roles=[image_builder_role_name])
        image_builder_profile.add_depends_on(
            image_builder_role.node.default_child)

        infrastructure_configuration = CfnInfrastructureConfiguration(
            self,
            f"InfrastructureConfig{construct_id}",
            name=f"DeadlineInfrastructureConfig{construct_id}",
            instance_profile_name=image_builder_profile_name)
        infrastructure_configuration.add_depends_on(image_builder_profile)

        return infrastructure_configuration
    def role(self) -> Role:
        inline_policies = {
            "ElasticsearchIndexPrividerFunctionElasticsearchAccessPolicy":
            PolicyDocument(statements=[
                PolicyStatement(
                    # TODO restrict this to appropriate API calls.
                    actions=["es:*"],
                    resources=["*"],
                )
            ]),
        }

        return Role(
            scope=self.__scope,
            id=f"{self.__name}ElasticsearchIndexResourceProviderRole",
            assumed_by=ServicePrincipal("lambda.amazonaws.com"),
            description=
            f"A role for ElasticsearchIndexResourceProvider lambda function.",
            inline_policies=inline_policies,
        )
Beispiel #11
0
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        global g_lambda_edge

        Tags.of(self).add("Stack", "Common-Lambda-Edge")

        self._role = Role(
            self,
            "EdgeLambdaRole",
            assumed_by=CompositePrincipal(
                ServicePrincipal("lambda.amazonaws.com"),
                ServicePrincipal("edgelambda.amazonaws.com"),
            ),
            managed_policies=[
                ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaBasicExecutionRole"),
            ],
        )

        if g_lambda_edge is not None:
            raise Exception("Only a single LambdaEdgeStack instance can exist")
        g_lambda_edge = self
    def __init__(self, scope: core.Construct, id: str, prefix: str,
                 source_bucket: s3.Bucket, dest_bucket: s3.Bucket,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        suffix = Fn.select(
            4, Fn.split('-', Fn.select(2, Fn.split('/', self.stack_id))))

        # KMS key for Kinesis Data Streams
        self.__kms_key = Key(scope=self,
                             id='kms-kinesis',
                             alias='custom/kinesis',
                             description='KMS key for Kinesis Data Streams',
                             enable_key_rotation=True)

        # Create Kinesis streams
        self.__sale_stream = Stream(scope=self,
                                    id="saleStream",
                                    stream_name="ara-web-sale",
                                    encryption_key=self.__kms_key)
        self.__address_stream = Stream(scope=self,
                                       id="addressStream",
                                       stream_name="ara-web-customer-address",
                                       encryption_key=self.__kms_key)
        self.__customer_stream = Stream(scope=self,
                                        id="customerStream",
                                        stream_name="ara-web-customer",
                                        encryption_key=self.__kms_key)

        # Role for the KDA service
        kda_role = Role(scope=self,
                        id='KinesisAnalyticsRole',
                        assumed_by=ServicePrincipal(
                            service='kinesisanalytics.amazonaws.com'))

        # Grant read on Kinesis streams
        self.__customer_stream.grant_read(kda_role)
        self.__address_stream.grant_read(kda_role)
        self.__sale_stream.grant_read(kda_role)

        # Grant read on source bucket (reference data)
        source_bucket.grant_read(kda_role)
        # Grant write on destination bucket
        dest_bucket.grant_write(kda_role)

        kda_role.add_to_policy(
            PolicyStatement(actions=['kinesis:ListShards'],
                            resources=[
                                self.__customer_stream.stream_arn,
                                self.__address_stream.stream_arn,
                                self.__sale_stream.stream_arn
                            ]))

        # Create Elasticsearch domain
        # TODO: use VPC subnets
        es_domain = EsDomain(scope=self,
                             id='EsDomain',
                             application_prefix=prefix,
                             suffix=suffix,
                             kda_role=kda_role)

        # Create the KDA application after the Elasticsearch service
        kda_app = KdaApplication(scope=self,
                                 id='KdaApplication',
                                 es_domain=es_domain.es_domain,
                                 kda_role=kda_role,
                                 source_bucket=source_bucket,
                                 dest_bucket=dest_bucket)

        core.Tags.of(self).add('module-name', 'streaming')
    def __init__(
            self,
            stack: core.Stack,
            prefix: str,
            code_repository: Repository,
            task_definition: str,
            app_spec: str,
            ecs_application: EcsApplication,
            main_listener: CfnListener,
            deployments_listener: CfnListener,
            production_target_group,
            deployment_target_group,
            ecs_cluster: Cluster,
    ) -> None:
        """
        Constructor.

        :param stack: A CloudFormation stack to which add this resource.
        :param prefix: Prefix for resource names.
        :param code_repository: A codecommit git repository to push configuration files for ecs deployment.
        :param task_definition: A document which describes how ecs deployment should behave.
        :param app_spec: A document which describes how ecs deployment should behave.
        :param ecs_application: An ecs application for which the deployments are being made.
        :param main_listener: A loadbalancer's main listener for main traffic.
        :param deployments_listener: A loadbalancer's testing listener for testing traffic.
        :param ecs_cluster: An ecs cluster in which our ecs application is located.
        """
        self.__stack = stack
        self.__prefix = prefix
        self.__code_repository = code_repository
        self.__task_definition = task_definition
        self.__app_spec = app_spec
        self.__ecs_application = ecs_application
        self.__main_listener = main_listener
        self.__deployments_listener = deployments_listener
        self.__production_target_group = production_target_group
        self.__deployment_target_group = deployment_target_group
        self.__ecs_cluster = ecs_cluster

        self.__custom_resource_role = Role(
            self.__stack,
            self.__prefix + 'CustomFargateDeploymentGroupRole',
            inline_policies={
                self.__prefix + 'CustomFargateDeploymentGroupPolicy': PolicyDocument(
                    statements=[
                        PolicyStatement(
                            actions=[
                                "codedeploy:GetDeploymentGroup",
                                "codedeploy:CreateDeploymentGroup",
                                "codedeploy:DeleteDeploymentGroup",
                                "codedeploy:UpdateDeploymentGroup",
                            ],
                            resources=['*'],
                            effect=Effect.ALLOW
                        ),
                        PolicyStatement(
                            actions=[
                                "logs:CreateLogGroup",
                                "logs:CreateLogStream",
                                "logs:PutLogEvents"
                            ],
                            resources=['*'],
                            effect=Effect.ALLOW
                        )
                    ]
                )},
            assumed_by=ServicePrincipal('lambda.amazonaws.com')
        )

        self.__deployment_group_role = Role(
            self.__stack,
            self.__prefix + 'FargateDeploymentGroupRole',
            path='/',
            inline_policies={
                self.__prefix + 'FargateDeploymentGroupPolicy': PolicyDocument(
                    statements=[
                        PolicyStatement(
                            actions=[
                                "ecs:DescribeServices",
                                "ecs:CreateTaskSet",
                                "ecs:UpdateServicePrimaryTaskSet",
                                "ecs:DeleteTaskSet",
                                "elasticloadbalancing:DescribeTargetGroups",
                                "elasticloadbalancing:DescribeListeners",
                                "elasticloadbalancing:ModifyListener",
                                "elasticloadbalancing:DescribeRules",
                                "elasticloadbalancing:ModifyRule",
                                "lambda:InvokeFunction",
                                "cloudwatch:DescribeAlarms",
                                "sns:Publish",
                                "s3:GetObject",
                                "s3:GetObjectMetadata",
                                "s3:GetObjectVersion",
                                "iam:PassRole"
                            ],
                            resources=['*'],
                            effect=Effect.ALLOW
                        )
                    ]
                )
            },
            assumed_by=CompositePrincipal(
                ServicePrincipal('ecs-tasks.amazonaws.com'),
                ServicePrincipal('codedeploy.amazonaws.com')
            )
        )
Beispiel #14
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        graphql_api = CfnGraphQLApi(self,
                                    'WeatherApi',
                                    name='weather-api',
                                    authentication_type='API_KEY')

        CfnApiKey(self, 'WeatherApiKey', api_id=graphql_api.attr_api_id)

        api_schema = CfnGraphQLSchema(self,
                                      'WeatherSchema',
                                      api_id=graphql_api.attr_api_id,
                                      definition="""
                type Destination {
                    id: ID!
                    description: String!
                    state: String!
                    city: String!
                    zip: String!
                    conditions: Weather!
                }
                        
                type Mutation {
                    addDestination(
                        id: ID,
                        description: String!,
                        state: String!,
                        city: String!,
                        zip: String!
                    ): Destination!
                }
                        
                type Query {
                    getWeather(city: String!): Weather
                    # Get a single value of type 'Post' by primary key.
                    getDestination(id: ID!, zip: String): Destination
                    getAllDestinations: [Destination]
                    getDestinationsByState(state: String!): [Destination]
                }
                        
                type Subscription {
                    newDestination: Destination
                        @aws_subscribe(mutations: ["addDestination"])
                }
                        
                type Weather {
                    description: String
                    current: String
                    maxTemp: String
                    minTemp: String
                }
                        
                schema {
                    query: Query
                    mutation: Mutation
                    subscription: Subscription
                }
            """)

        table_name = 'destinations'

        table = Table(self,
                      'DestinationsTable',
                      table_name=table_name,
                      partition_key=Attribute(
                          name="id",
                          type=AttributeType.STRING,
                      ),
                      billing_mode=BillingMode.PAY_PER_REQUEST,
                      stream=StreamViewType.NEW_IMAGE)

        table_role = Role(self,
                          'DestinationsDynamoDBRole',
                          assumed_by=ServicePrincipal('appsync.amazonaws.com'))

        table_role.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                'AmazonDynamoDBFullAccess'))

        data_source = CfnDataSource(
            self,
            'DestinationsDataSource',
            api_id=graphql_api.attr_api_id,
            name='DestinationsDynamoDataSource',
            type='AMAZON_DYNAMODB',
            dynamo_db_config=CfnDataSource.DynamoDBConfigProperty(
                table_name=table.table_name, aws_region=self.region),
            service_role_arn=table_role.role_arn)

        lambdaFn = Function(self,
                            "GetWeather",
                            code=Code.asset(os.getcwd() + "/lambdas/weather/"),
                            handler="weather.get",
                            timeout=core.Duration.seconds(900),
                            memory_size=128,
                            runtime=Runtime.NODEJS_10_X,
                            environment={'APPID': os.getenv('APPID')})

        lambda_role = Role(
            self,
            'WeatherLambdaRole',
            assumed_by=ServicePrincipal('appsync.amazonaws.com'))

        lambda_role.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name('AWSLambdaFullAccess'))

        lambda_source = CfnDataSource(
            self,
            'WeatherDataSource',
            api_id=graphql_api.attr_api_id,
            name='WeatherCondition',
            type='AWS_LAMBDA',
            lambda_config=CfnDataSource.LambdaConfigProperty(
                lambda_function_arn=lambdaFn.function_arn),
            service_role_arn=lambda_role.role_arn)

        self.add_resolvers(graphql_api,
                           api_schema,
                           data_source=data_source,
                           lambda_source=lambda_source)
    def __init__(self,
                 scope: Construct,
                 id: str,
                 elasticsearch_index: ElasticsearchIndexResource,
                 dynamodb_table: Table,
                 kms_key: Optional[Key] = None,
                 *,
                 sagemaker_endpoint_name: str = None,
                 sagemaker_endpoint_arn: str = None,
                 sagemaker_embeddings_key: str = None) -> None:
        super().__init__(scope=scope, id=id)

        elasticsearch_layer = BElasticsearchLayer(
            scope=self, name=f"{id}ElasticsearchLayer")

        if bool(sagemaker_endpoint_name) ^ bool(sagemaker_embeddings_key):
            raise ValueError(
                f'In order to use sentence embedding, all of the following enviroment variables are required: '
                f'SAGEMAKER_ENDPOINT_NAME, SAGEMAKER_EMBEDDINGS_KEY. '
                f'Else, provide none of above.')

        if sagemaker_endpoint_name and not sagemaker_endpoint_arn:
            sagemaker_endpoint_arn = self.__resolve_sagemaker_endpoints_arn(
                '*')

        optional_sagemaker_parameters = {
            'SAGEMAKER_ENDPOINT_NAME': sagemaker_endpoint_name or None,
            'SAGEMAKER_EMBEDDINGS_KEY': sagemaker_embeddings_key or None
        }

        initial_cloner_function = SingletonFunction(
            scope=self,
            id='InitialClonerFunction',
            uuid='e01116a4-f939-43f2-8f5b-cc9f862c9e01',
            lambda_purpose='InitialClonerSingletonLambda',
            code=Code.from_asset(initial_cloner_root),
            handler='index.handler',
            runtime=Runtime.PYTHON_3_8,
            layers=[elasticsearch_layer],
            log_retention=RetentionDays.ONE_MONTH,
            memory_size=128,
            timeout=Duration.minutes(15),
            role=Role(
                scope=self,
                id='InitialClonerFunctionRole',
                assumed_by=ServicePrincipal('lambda.amazonaws.com'),
                inline_policies={
                    'LogsPolicy':
                    PolicyDocument(statements=[
                        PolicyStatement(
                            actions=[
                                'logs:CreateLogGroup',
                                'logs:CreateLogStream',
                                'logs:PutLogEvents',
                                'logs:DescribeLogStreams',
                            ],
                            resources=['arn:aws:logs:*:*:*'],
                            effect=Effect.ALLOW,
                        )
                    ]),
                    'ElasticsearchPolicy':
                    PolicyDocument(statements=[
                        PolicyStatement(
                            actions=[
                                'es:ESHttpDelete',
                                'es:ESHttpGet',
                                'es:ESHttpHead',
                                'es:ESHttpPatch',
                                'es:ESHttpPost',
                                'es:ESHttpPut',
                            ],
                            resources=['*'],
                            effect=Effect.ALLOW,
                        )
                    ]),
                    'DynamodbPolicy':
                    PolicyDocument(statements=[
                        PolicyStatement(
                            actions=['dynamodb:*'],
                            resources=['*'],
                            effect=Effect.ALLOW,
                        )
                    ]),
                },
                description='Role for DynamoDB Initial Cloner Function',
            ),
        )

        if kms_key:
            initial_cloner_function.add_to_role_policy(
                PolicyStatement(
                    actions=['kms:Decrypt'],
                    resources=[kms_key.key_arn],
                    effect=Effect.ALLOW,
                ), )

        initial_cloner = CustomResource(
            scope=self,
            id='InitialCloner',
            service_token=initial_cloner_function.function_arn,
            removal_policy=RemovalPolicy.DESTROY,
            properties={
                'DynamodbTableName':
                dynamodb_table.table_name,
                'ElasticsearchIndexName':
                elasticsearch_index.index_name,
                'ElasticsearchEndpoint':
                elasticsearch_index.elasticsearch_domain.domain_endpoint,
            },
            resource_type='Custom::ElasticsearchInitialCloner',
        )

        primary_key_field = initial_cloner.get_att_string('PrimaryKeyField')

        dynamodb_stream_arn = dynamodb_table.table_stream_arn
        if not dynamodb_stream_arn:
            raise Exception('DynamoDB streams must be enabled for the table')

        dynamodb_event_source = DynamoEventSource(
            table=dynamodb_table,
            starting_position=StartingPosition.LATEST,
            enabled=True,
            max_batching_window=Duration.seconds(10),
            bisect_batch_on_error=True,
            parallelization_factor=2,
            batch_size=1000,
            retry_attempts=10,
        )

        cloner_inline_policies = {
            'LogsPolicy':
            PolicyDocument(statements=[
                PolicyStatement(
                    actions=[
                        'logs:CreateLogGroup',
                        'logs:CreateLogStream',
                        'logs:PutLogEvents',
                        'logs:DescribeLogStreams',
                    ],
                    resources=['arn:aws:logs:*:*:*'],
                    effect=Effect.ALLOW,
                )
            ]),
            'ElasticsearchPolicy':
            PolicyDocument(statements=[
                PolicyStatement(
                    actions=[
                        'es:ESHttpDelete',
                        'es:ESHttpGet',
                        'es:ESHttpHead',
                        'es:ESHttpPatch',
                        'es:ESHttpPost',
                        'es:ESHttpPut',
                    ],
                    resources=[
                        f'{elasticsearch_index.elasticsearch_domain.domain_arn}/*'
                    ],
                    effect=Effect.ALLOW,
                )
            ]),
            'DynamodbStreamsPolicy':
            PolicyDocument(statements=[
                PolicyStatement(
                    actions=[
                        'dynamodb:DescribeStream',
                        'dynamodb:GetRecords',
                        'dynamodb:GetShardIterator',
                        'dynamodb:ListStreams',
                    ],
                    resources=[dynamodb_stream_arn],
                    effect=Effect.ALLOW,
                )
            ]),
        }

        if sagemaker_endpoint_arn:
            cloner_inline_policies['SagemakerPolicy'] = PolicyDocument(
                statements=[
                    PolicyStatement(actions=['sagemaker:InvokeEndpoint'],
                                    resources=[sagemaker_endpoint_arn],
                                    effect=Effect.ALLOW)
                ])

        cloner_function = Function(
            scope=self,
            id='ClonerFunction',
            code=Code.from_asset(cloner_root),
            handler='index.handler',
            runtime=Runtime.PYTHON_3_8,
            environment={
                'ES_INDEX_NAME': elasticsearch_index.index_name,
                'ES_DOMAIN_ENDPOINT':
                elasticsearch_index.elasticsearch_domain.domain_endpoint,
                'PRIMARY_KEY_FIELD': primary_key_field,
                **{
                    k: optional_sagemaker_parameters[k]
                    for k in optional_sagemaker_parameters if all(optional_sagemaker_parameters.values(
                    ))
                }
            },
            events=[dynamodb_event_source],
            layers=[elasticsearch_layer],
            log_retention=RetentionDays.ONE_MONTH,
            memory_size=128,
            role=Role(
                scope=self,
                id='ClonerFunctionRole',
                assumed_by=ServicePrincipal('lambda.amazonaws.com'),
                inline_policies=cloner_inline_policies,
                description='Role for DynamoDB Cloner Function',
            ),
            timeout=Duration.seconds(30),
        )

        if kms_key:
            cloner_function.add_to_role_policy(
                PolicyStatement(
                    actions=['kms:Decrypt'],
                    resources=[kms_key.key_arn],
                    effect=Effect.ALLOW,
                ))
Beispiel #16
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        table_name = 'items'

        items_graphql_api = CfnGraphQLApi(self,
                                          'ItemsApi',
                                          name='items-api',
                                          authentication_type='API_KEY')

        CfnApiKey(self, 'ItemsApiKey', api_id=items_graphql_api.attr_api_id)

        api_schema = CfnGraphQLSchema(self,
                                      'ItemsSchema',
                                      api_id=items_graphql_api.attr_api_id,
                                      definition=f"""\
                type {table_name} {{
                    {table_name}Id: ID!
                    name: String
                }}
                type Paginated{table_name} {{
                    items: [{table_name}!]!
                    nextToken: String
                }}
                type Query {{
                    all(limit: Int, nextToken: String): Paginated{table_name}!
                    getOne({table_name}Id: ID!): {table_name}
                }}
                type Mutation {{
                    save(name: String!): {table_name}
                    delete({table_name}Id: ID!): {table_name}
                }}
                type Schema {{
                    query: Query
                    mutation: Mutation
                }}""")

        items_table = Table(
            self,
            'ItemsTable',
            table_name=table_name,
            partition_key=Attribute(name=f'{table_name}Id',
                                    type=AttributeType.STRING),
            billing_mode=BillingMode.PAY_PER_REQUEST,
            stream=StreamViewType.NEW_IMAGE,

            # The default removal policy is RETAIN, which means that cdk
            # destroy will not attempt to delete the new table, and it will
            # remain in your account until manually deleted. By setting the
            # policy to DESTROY, cdk destroy will delete the table (even if it
            # has data in it)
            removal_policy=core.RemovalPolicy.
            DESTROY  # NOT recommended for production code
        )

        items_table_role = Role(
            self,
            'ItemsDynamoDBRole',
            assumed_by=ServicePrincipal('appsync.amazonaws.com'))

        items_table_role.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                'AmazonDynamoDBFullAccess'))

        data_source = CfnDataSource(
            self,
            'ItemsDataSource',
            api_id=items_graphql_api.attr_api_id,
            name='ItemsDynamoDataSource',
            type='AMAZON_DYNAMODB',
            dynamo_db_config=CfnDataSource.DynamoDBConfigProperty(
                table_name=items_table.table_name, aws_region=self.region),
            service_role_arn=items_table_role.role_arn)

        get_one_resolver = CfnResolver(
            self,
            'GetOneQueryResolver',
            api_id=items_graphql_api.attr_api_id,
            type_name='Query',
            field_name='getOne',
            data_source_name=data_source.name,
            request_mapping_template=f"""\
            {{
                "version": "2017-02-28",
                "operation": "GetItem",
                "key": {{
                "{table_name}Id": $util.dynamodb.toDynamoDBJson($ctx.args.{table_name}Id)
                }}
            }}""",
            response_mapping_template="$util.toJson($ctx.result)")

        get_one_resolver.add_depends_on(api_schema)

        get_all_resolver = CfnResolver(
            self,
            'GetAllQueryResolver',
            api_id=items_graphql_api.attr_api_id,
            type_name='Query',
            field_name='all',
            data_source_name=data_source.name,
            request_mapping_template=f"""\
            {{
                "version": "2017-02-28",
                "operation": "Scan",
                "limit": $util.defaultIfNull($ctx.args.limit, 20),
                "nextToken": $util.toJson($util.defaultIfNullOrEmpty($ctx.args.nextToken, null))
            }}""",
            response_mapping_template="$util.toJson($ctx.result)")

        get_all_resolver.add_depends_on(api_schema)

        save_resolver = CfnResolver(
            self,
            'SaveMutationResolver',
            api_id=items_graphql_api.attr_api_id,
            type_name='Mutation',
            field_name='save',
            data_source_name=data_source.name,
            request_mapping_template=f"""\
            {{
                "version": "2017-02-28",
                "operation": "PutItem",
                "key": {{
                    "{table_name}Id": {{ "S": "$util.autoId()" }}
                }},
                "attributeValues": {{
                    "name": $util.dynamodb.toDynamoDBJson($ctx.args.name)
                }}
            }}""",
            response_mapping_template="$util.toJson($ctx.result)")

        save_resolver.add_depends_on(api_schema)

        delete_resolver = CfnResolver(
            self,
            'DeleteMutationResolver',
            api_id=items_graphql_api.attr_api_id,
            type_name='Mutation',
            field_name='delete',
            data_source_name=data_source.name,
            request_mapping_template=f"""\
            {{
                "version": "2017-02-28",
                "operation": "DeleteItem",
                "key": {{
                "{table_name}Id": $util.dynamodb.toDynamoDBJson($ctx.args.{table_name}Id)
                }}
            }}""",
            response_mapping_template="$util.toJson($ctx.result)")

        delete_resolver.add_depends_on(api_schema)
    def __init__(self, scope: cdk.Construct, construct_id: str, config, vpc: IVpc, instance: IInstance, neo4j_user_secret: ISecret, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        bucket = Bucket(self, "s3-bucket-altimeter", 
            bucket_name=config["s3_bucket"],
            encryption=BucketEncryption.UNENCRYPTED, #.S3_MANAGED, # Disable encryption since it's not really required and it conflicts with SCP guardrails set by Control Tower on the Audit account.
            block_public_access=BlockPublicAccess.BLOCK_ALL
        )

        cluster = Cluster(self, "ecs-cluster-altimeter", 
            cluster_name="ecsclstr-altimeter--default",
            vpc=vpc               
        )

        task_role = Role(self, "iam-role-altimeter-task-role",
            assumed_by=ServicePrincipal("ecs-tasks.amazonaws.com"),
            # It appears that within the account where the scanner is running, the task role is (partially) used for scanning resources (rather than the altimeter-scanner-access role).      
            managed_policies=[
                ManagedPolicy.from_aws_managed_policy_name('SecurityAudit'),
                ManagedPolicy.from_aws_managed_policy_name('job-function/ViewOnlyAccess')
            ]
        )

        task_definition = FargateTaskDefinition(self, "ecs-fgtd-altimeter",
            task_role=task_role,
            memory_limit_mib=self.MEMORY_LIMIT,
            cpu=self.CPU
        )

        docker_path = os.path.join(os.path.curdir,"..")

        image_asset = DockerImageAsset(self, 'ecr-assets-dia-altimeter', 
            directory=docker_path,
            file="scanner.Dockerfile"
        )            

        task_definition.add_container("ecs-container-altimeter",            
            image= ContainerImage.from_docker_image_asset(image_asset),
            # memory_limit_mib=self.MEMORY_LIMIT,
            # cpu=self.CPU,
            environment= {
                "CONFIG_PATH": config["altimeter_config_path"],
                "S3_BUCKET": config["s3_bucket"]
            },
            logging= AwsLogDriver(
                stream_prefix= 'altimeter',
                log_retention= RetentionDays.TWO_WEEKS
            )
        )

        task_definition.add_to_task_role_policy(PolicyStatement(
            resources=["arn:aws:iam::*:role/"+config["account_execution_role"]],
            actions=['sts:AssumeRole']
        ))

        task_definition.add_to_task_role_policy(PolicyStatement(
            resources=[
                "arn:aws:s3:::"+config["s3_bucket"],
                "arn:aws:s3:::"+config["s3_bucket"]+"/*"
            ],
            actions=["s3:GetObject*",
                "s3:GetBucket*",
                "s3:List*",
                "s3:DeleteObject*",
                "s3:PutObject",
                "s3:Abort*",
                "s3:PutObjectTagging"]
        ))

        # Grant the ability to record the stdout to CloudWatch Logs
        # TODO: Refine
        task_definition.add_to_task_role_policy(PolicyStatement(
            resources=["*"],
            actions=['logs:*']
        ))

        # Trigger task every 24 hours
        Rule(self, "events-rule-altimeter-daily-scan",
            rule_name="evrule--altimeter-daily-scan",
            schedule=Schedule.cron(hour="0", minute="0"),
            description="Daily altimeter scan",
            targets=[EcsTask(
                task_definition=task_definition,
                cluster=cluster,
                subnet_selection=SubnetSelection(subnet_type=SubnetType.PRIVATE)
            )]
        )

        # Trigger task manually via event
        Rule(self, "events-rule-altimeter-manual-scan",
            rule_name="evrule--altimeter-manual-scan",
            event_pattern=EventPattern(source=['altimeter']), 
            description="Manual altimeter scan",
            targets=[EcsTask(
                task_definition=task_definition,
                cluster=cluster,
                subnet_selection=SubnetSelection(subnet_type=SubnetType.PRIVATE)
            )]
        )        


        # Don't put Neo4j Importer lambda in a separate stack since it causes a circular reference with the S3 event source, and using an imported bucket as event source is not possible (you need a Bucket, not an IBucket)



        neo4j_importer_function = PythonFunction(self, 'lambda-function-neo4j-importer',
            function_name="function-altimeter--neo4j-importer",             
            entry="../neo4j-importer",
            index="app.py",
            handler="lambda_handler",
            runtime=Runtime.PYTHON_3_8,
            memory_size=256,
            timeout=cdk.Duration.seconds(60),
            vpc=vpc,
            vpc_subnets=SubnetSelection(subnets=vpc.select_subnets(subnet_group_name='Private').subnets),
            environment={
                "neo4j_address": instance.instance_private_ip,
                "neo4j_user_secret_name": neo4j_user_secret.secret_name
            }
        )

        neo4j_importer_function.add_event_source(
            S3EventSource(bucket,
                events= [EventType.OBJECT_CREATED, EventType.OBJECT_REMOVED],
                filters= [ { "prefix": "raw/", "suffix": ".rdf"}]
            )
        )

        # Grant lambda read/write access to the S3 bucket for reading raw rdf, writing prepared rdf and generating signed uri
        bucket.grant_read_write(neo4j_importer_function.role)
        # Grant lambda read access to the neo4j user secret
        neo4j_user_secret.grant_read(neo4j_importer_function.role)
    def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here
        table_name = "trainers"

        trainers_graphql_api = CfnGraphQLApi(
            self,'trainersApi',
            name="trainers-api",
            authentication_type='API_KEY'
        )
        
        CfnApiKey(
            self,'TrainersApiKey',
            api_id = trainers_graphql_api.attr_api_id

        )

        api_schema = CfnGraphQLSchema(
            self,"TrainersSchema",
            api_id = trainers_graphql_api.attr_api_id,
            definition=data_schema
        )

        trainers_table = Table(
            self, 'TrainersTable',
            table_name=table_name,
            partition_key=Attribute(
                name='id',
                type=AttributeType.STRING,

            ),

        
            billing_mode=BillingMode.PAY_PER_REQUEST,
            stream=StreamViewType.NEW_IMAGE,

            # The default removal policy is RETAIN, which means that cdk
            # destroy will not attempt to delete the new table, and it will
            # remain in your account until manually deleted. By setting the
            # policy to DESTROY, cdk destroy will delete the table (even if it
            # has data in it)
            removal_policy=core.RemovalPolicy.DESTROY  # NOT recommended for production code
        )

        trainers_table_role = Role(
            self, 'TrainersDynamoDBRole',
            assumed_by=ServicePrincipal('appsync.amazonaws.com')
        )

        trainers_table_role.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                'AmazonDynamoDBFullAccess'
            )
        )

        data_source = CfnDataSource(
            self, 'TrainersDataSource',
            api_id=trainers_graphql_api.attr_api_id,
            name='TrainersDynamoDataSource',
            type='AMAZON_DYNAMODB',
            dynamo_db_config=CfnDataSource.DynamoDBConfigProperty(
                table_name=trainers_table.table_name,
                aws_region=self.region
            ),
            service_role_arn=trainers_table_role.role_arn
        )

        get_Trainer_resolver = CfnResolver(
            self, 'GetOneQueryResolver',
            api_id=trainers_graphql_api.attr_api_id,
            type_name='Query',
            field_name='getTrainer',
            data_source_name=data_source.name,
            request_mapping_template=get_trainer,
            response_mapping_template="$util.toJson($ctx.result)"
        )

        get_Trainer_resolver.add_depends_on(api_schema)

        get_all_trainers_resolver = CfnResolver(
            self, 'GetAllQueryResolver',
            api_id=trainers_graphql_api.attr_api_id,
            type_name='Query',
            field_name='allTrainers',
            data_source_name=data_source.name,
            request_mapping_template=all_trainers,
            response_mapping_template="$util.toJson($ctx.result)"
        )

        get_all_trainers_resolver.add_depends_on(api_schema)
     
        create_trainers_resolver = CfnResolver(
            self, 'CreateTrainerMutationResolver',
            api_id=trainers_graphql_api.attr_api_id,
            type_name='Mutation',
            field_name='createTrainer',
            data_source_name=data_source.name,
            request_mapping_template=create_trainer,
            response_mapping_template="$util.toJson($ctx.result)"
        )

        create_trainers_resolver.add_depends_on(api_schema)

        update_trainers_resolver = CfnResolver(
            self,'UpdateMutationResolver',
            api_id=trainers_graphql_api.attr_api_id,
            type_name="Mutation",
            field_name="updateTrainers",
            data_source_name=data_source.name,
            request_mapping_template=update_trainer,
            response_mapping_template="$util.toJson($ctx.result)"
        )
        update_trainers_resolver.add_depends_on(api_schema)

        delete_trainer_resolver = CfnResolver(
            self, 'DeleteMutationResolver',
            api_id=trainers_graphql_api.attr_api_id,
            type_name='Mutation',
            field_name='deleteTrainer',
            data_source_name=data_source.name,
            request_mapping_template=delete_trainer,
            response_mapping_template="$util.toJson($ctx.result)"
        )
#core
        delete_trainer_resolver.add_depends_on(api_schema)
Beispiel #19
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # s3
        s3_bucket_name = "{}-s3-{}".format(Constant.PROJECT_NAME,
                                           self._get_UUID(4))
        _s3.Bucket(
            self,
            id=s3_bucket_name,
            bucket_name=s3_bucket_name,
            removal_policy=core.RemovalPolicy.
            DESTROY,  #TODO:  destroy for test
            # removal_policy=core.RemovalPolicy.RETAIN
        )

        # step 1. VPC
        # 如果在已有的Vpc 中建立环境, 可以用下面这句, 需要传入 vpc_id
        # vpc = ec2.Vpc.from_lookup(self, "VPC", vpc_id='')
        vpc = ec2.Vpc(
            self,
            "VPC",
            max_azs=2,  # 两个分区, 每个分区建一个子网
            cidr="10.10.0.0/16",
            # configuration will create 3 groups in 2 AZs = 6 subnets.
            subnet_configuration=[
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PUBLIC,
                                        name="Public",
                                        cidr_mask=24),
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PRIVATE,
                                        name="Private",
                                        cidr_mask=24),
                # ec2.SubnetConfiguration(
                #     subnet_type=ec2.SubnetType.ISOLATED,
                #     name="DB",
                #     cidr_mask=24
                # )
            ],
            # nat_gateway_provider=ec2.NatProvider.gateway(),
            # nat_gateways=2,
        )

        # ES 需要部署到私有子网中
        selection = vpc.select_subnets(subnet_type=ec2.SubnetType.PRIVATE)

        # step 2. 访问S3 + ES集群需要的 iam_instance_profile
        #  action -> statement -> policy -> role -> instance profile ->  attach ec2

        actions = [
            "ec2:CreateNetworkInterface", "ec2:DeleteNetworkInterface",
            "ec2:DescribeNetworkInterfaces",
            "ec2:ModifyNetworkInterfaceAttribute",
            "ec2:DescribeSecurityGroups", "ec2:DescribeSubnets",
            "ec2:DescribeVpcs", "s3:*"
        ]

        policyStatement = PolicyStatement(actions=actions, effect=Effect.ALLOW)
        policyStatement.add_all_resources()
        policyStatement.sid = "Stmt1480452973134"

        policy_name = "{}-ec2-es-policy".format(Constant.PROJECT_NAME)
        ec2_policy = Policy(self, policy_name, policy_name=policy_name)

        ec2_policy.add_statements(policyStatement)

        role_name = "{}-ec2-es-role".format(Constant.PROJECT_NAME)
        access_es_role = Role(
            self,
            role_name,
            role_name=role_name,
            assumed_by=ServicePrincipal('ec2.amazonaws.com.cn'))

        ec2_policy.attach_to_role(access_es_role)

        profile_name = "{}-ec2-es-profile".format(Constant.PROJECT_NAME)
        instance_profile = CfnInstanceProfile(
            self,
            profile_name,
            instance_profile_name=profile_name,
            roles=[access_es_role.role_name])

        # step 4. ES

        # 生产环境建议设置安全组, 只接收VPC内443端口请求
        sg_es_cluster_name = "{}-sg-es".format(Constant.PROJECT_NAME)
        sg_es_cluster = ec2.SecurityGroup(
            self,
            id=sg_es_cluster_name,
            vpc=vpc,
            security_group_name=sg_es_cluster_name)

        sg_es_cluster.add_ingress_rule(peer=ec2.Peer.ipv4(vpc.vpc_cidr_block),
                                       connection=ec2.Port.tcp(443))

        es_name = Constant.PROJECT_NAME
        es_arn = self.format_arn(service="es",
                                 resource="domain",
                                 sep="/",
                                 resource_name=es_name)
        es = elasticsearch.CfnDomain(
            self,
            es_name,
            elasticsearch_version='7.1',
            domain_name=es_name,
            node_to_node_encryption_options={"enabled": False},
            vpc_options={
                "securityGroupIds": [sg_es_cluster.security_group_id
                                     ],  # 生产环境建议设置安全组, 只接收VPC内443端口请求
                # 如果开启多个节点, 需要配置多个子网, 目前测试只有一个ES 节点, 就只用到一个子网
                "subnetIds": selection.subnet_ids[:1]
            },
            ebs_options={
                "ebsEnabled": True,
                "volumeSize": 10,
                "volumeType": "gp2"
            },
            elasticsearch_cluster_config={
                # 生成环境需要开启三个
                # "dedicatedMasterCount": 3,
                # "dedicatedMasterEnabled": True,
                # "dedicatedMasterType": 'm4.large.elasticsearch',
                "instanceCount": 1,
                "instanceType": 'm4.large.elasticsearch',
                "zoneAwarenessEnabled": False
            })
        es.access_policies = {
            "Version":
            "2012-10-17",
            "Statement": [{
                "Effect": "Allow",
                "Principal": {
                    "AWS": "*"
                },
                "Action": "es:*",
                "Resource": "{}/*".format(es_arn)
            }]
        }

        # step 5.  SNS
        topic = sns.Topic(self, "topic")
        topic.add_subscription(subs.EmailSubscription(Constant.EMAIL_ADDRESS))

        # 设置SNS endpoint, 让lambda 可以从vpc 内部访问
        vpc.add_interface_endpoint(
            "SNSEndpoint", service=ec2.InterfaceVpcEndpointAwsService.SNS)

        # step 6. Lambda
        lambdaFn = lambda_.Function(self,
                                    "Singleton",
                                    code=lambda_.Code.asset('lambda'),
                                    handler='hello.handler',
                                    vpc=vpc,
                                    vpc_subnets=ec2.SubnetSelection(
                                        subnet_type=ec2.SubnetType.PRIVATE),
                                    timeout=core.Duration.seconds(300),
                                    runtime=lambda_.Runtime.PYTHON_3_7,
                                    environment={
                                        'SNS_TOPIC_ARN': topic.topic_arn,
                                        'ES_ENDPOINT': es.attr_domain_endpoint,
                                        'ES_INDEX_NAME': Constant.ES_INDEX_NAME
                                    })

        # step 7. Cloud watch event
        rule = events.Rule(
            self,
            "Rule",
            schedule=events.Schedule.cron(minute='0/5',
                                          hour='*',
                                          month='*',
                                          week_day='*',
                                          year='*'),
        )
        rule.add_target(targets.LambdaFunction(lambdaFn))

        #给Lambda 添加发布SNS的权限
        topic.grant_publish(lambdaFn)

        # Create ALB
        alb_name = "{}-alb".format(Constant.PROJECT_NAME)
        alb = elb.ApplicationLoadBalancer(self,
                                          alb_name,
                                          vpc=vpc,
                                          internet_facing=True,
                                          load_balancer_name=alb_name)
        alb.connections.allow_from_any_ipv4(ec2.Port.tcp(80),
                                            "Internet access ALB 80")
        listener = alb.add_listener("my80", port=80, open=True)

        # Create Autoscaling Group with fixed 2*EC2 hosts

        user_data = user_data_content.format(es.attr_domain_endpoint,
                                             Constant.REGION_NAME,
                                             Constant.ES_LOG_PATH,
                                             Constant.ES_INDEX_NAME,
                                             s3_bucket_name)

        # step 3. 创建堡垒机

        bastion_name = "{}-bastion".format(Constant.PROJECT_NAME)
        bastion = ec2.BastionHostLinux(
            self,
            bastion_name,
            vpc=vpc,
            subnet_selection=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC),
            instance_name=bastion_name,
            instance_type=ec2.InstanceType(
                instance_type_identifier="m4.large"))
        bastion.instance.instance.add_property_override(
            "KeyName", Constant.EC2_KEY_NAME)
        bastion.connections.allow_from_any_ipv4(
            ec2.Port.tcp(22), "Internet access SSH")  # 生成环境可以限定IP allow_from
        bastion.connections.allow_from_any_ipv4(ec2.Port.tcp(8080),
                                                "Internet access HTTP")  # 测试需要
        # bastion.connections.allow_from_any_ipv4(ec2.Port.tcp(443), "Internet access HTTPS")  # 测试需要

        bastion.instance.instance.iam_instance_profile = instance_profile.instance_profile_name  # 给EC2设置 profile , 相当于Role
        bastion.instance.instance.image_id = ami_map.get(
            Constant.REGION_NAME)  # 指定AMI ID

        #堡垒机的user_data 只能执行一次, 如果要执行多次, 请参考 https://amazonaws-china.com/premiumsupport/knowledge-center/execute-user-data-ec2/?nc1=h_ls
        bastion_user_data = "/home/ec2-user/start.sh {}  {} '{}' {} {}".format(
            es.attr_domain_endpoint, Constant.REGION_NAME,
            Constant.ES_LOG_PATH, Constant.ES_INDEX_NAME, s3_bucket_name)
        bastion.instance.add_user_data(
            "date >> /home/ec2-user/root.txt")  # 查看启动脚本是否执行
        bastion.instance.add_user_data(bastion_user_data)

        asg_name = "{}-asg".format(Constant.PROJECT_NAME)
        asg = autoscaling.AutoScalingGroup(
            self,
            asg_name,
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC),  # PUBLIC for debug
            instance_type=ec2.InstanceType(
                instance_type_identifier="m4.large"),
            machine_image=my_ami,
            key_name=Constant.EC2_KEY_NAME,
            user_data=ec2.UserData.custom(user_data),
            desired_capacity=1,
            min_capacity=1,
            max_capacity=1,
            role=access_es_role)

        asg.connections.allow_from(
            alb, ec2.Port.tcp(8080),
            "ALB access 80 port of EC2 in Autoscaling Group")
        # asg.connections.allow_from_any_ipv4(ec2.Port.tcp(8080), "Internet access HTTP for test") # 测试用
        asg.connections.allow_from_any_ipv4(ec2.Port.tcp(22),
                                            "Internet access SSH")  # for debug
        listener.add_targets("addTargetGroup", port=8080, targets=[asg])

        core.CfnOutput(self,
                       "s3_bucket_name",
                       value=s3_bucket_name,
                       description='S3 bucket:  store web log')

        core.CfnOutput(self,
                       "ElasticSearchEndpointUrl",
                       value=es.attr_domain_endpoint,
                       description='Elastic Search Url')

        # Elastic Search 统计log 数量, 可以在堡垒机上执行, 快速查看日志数量。
        core.CfnOutput(self,
                       "CmdGetCountIndex",
                       value='curl https://{}/{}/_count'.format(
                           es.attr_domain_endpoint, Constant.ES_INDEX_NAME),
                       description='Count search result. ')

        # 堡垒机的登录命令, 可以直接复制使用
        core.CfnOutput(self,
                       "CmdSshToBastion",
                       value='ssh -i ~/{}.pem ec2-user@{}'.format(
                           Constant.EC2_KEY_NAME,
                           bastion.instance_public_dns_name),
                       description='cmd ssh to bastion')

        # 在堡垒机上启动服务的命令, 堡垒机重启以后, 需要执行下面的命令, 可以启动web服务 发送日志到ES
        core.CfnOutput(
            self,
            "CmdSshBastionStartWeb",
            value='sudo {}'.format(bastion_user_data),
            description="Cmd to start web+logstash+filebeat service")

        # ALB 的访问地址
        core.CfnOutput(self,
                       "UrlLoad_Balancer",
                       value='http://{}'.format(alb.load_balancer_dns_name),
                       description='ALB  url ')

        # 堡垒机的web访问地址, 为了调试方便, 在堡垒机上也使用相同的AMI。
        core.CfnOutput(self,
                       "UrlBastion",
                       value='http://{}:8080'.format(
                           bastion.instance_public_dns_name),
                       description="Bastion server web url ")

        # 下面这条输出的命令 是通过堡垒机和Elasticsearch 建立隧道, 在本地访问kibana。
        core.CfnOutput(
            self,
            "CmdSshProxyToKibana",
            value='ssh -i ~/{}.pem ec2-user@{}  -N -L 9200:{}:443'.format(
                Constant.EC2_KEY_NAME, bastion.instance_public_dns_name,
                es.attr_domain_endpoint),
            description="cmd: access kibana from bastion ssh. ")
        # 执行完上面的命令后, 在浏览器中打开下面的连接
        core.CfnOutput(self,
                       "UrlKibana",
                       value='https://localhost:9200/_plugin/kibana/',
                       description="kibana url ")
Beispiel #20
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        alexa_assets = os.path.dirname(
            os.path.realpath(__file__)) + "/../skill"
        asset = assets.Asset(self, 'SkillAsset', path=alexa_assets)

        # role to access bucket
        role = Role(self,
                    'Role',
                    assumed_by=CompositePrincipal(
                        ServicePrincipal('alexa-appkit.amazon.com'),
                        ServicePrincipal('cloudformation.amazonaws.com')))

        # Allow the skill resource to access the zipped skill package
        role.add_to_policy(
            PolicyStatement(
                actions=['S3:GetObject'],
                resources=[
                    f'arn:aws:s3:::{asset.s3_bucket_name}/{asset.s3_object_key}'
                ]))

        # DynamoDB Table
        users_table = dynamo_db.Table(
            self,
            'Users',
            partition_key=dynamo_db.Attribute(
                name='userId', type=dynamo_db.AttributeType.STRING),
            billing_mode=dynamo_db.BillingMode.PAY_PER_REQUEST,
            removal_policy=core.RemovalPolicy.DESTROY)

        # install node dependencies for lambdas
        lambda_folder = os.path.dirname(
            os.path.realpath(__file__)) + "/../lambda_fns"
        subprocess.check_call("npm i".split(),
                              cwd=lambda_folder,
                              stdout=subprocess.DEVNULL)
        subprocess.check_call("npm run build".split(),
                              cwd=lambda_folder,
                              stdout=subprocess.DEVNULL)

        alexa_lambda = _lambda.Function(
            self,
            "AlexaLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,
            code=_lambda.Code.from_asset("lambda_fns"),
            handler="lambda.handler",
            environment={"USERS_TABLE": users_table.table_name})

        # grant the lambda role read/write permissions to our table
        users_table.grant_read_write_data(alexa_lambda)

        # create the skill
        skill = alexa_ask.CfnSkill(
            self,
            'the-alexa-skill',
            vendor_id='',
            authentication_configuration={
                'clientId': '',
                'clientSecret': '',
                'refreshToken': ''
            },
            skill_package={
                's3Bucket': asset.s3_bucket_name,
                's3Key': asset.s3_object_key,
                's3BucketRole': role.role_arn,
                'overrides': {
                    'manifest': {
                        'apis': {
                            'custom': {
                                'endpoint': {
                                    'uri': alexa_lambda.function_arn
                                }
                            }
                        }
                    }
                }
            })

        ###
        # Allow the Alexa service to invoke the fulfillment Lambda.
        # In order for the Skill to be created, the fulfillment Lambda
        # must have a permission allowing Alexa to invoke it, this causes
        # a circular dependency and requires the first deploy to allow all
        # Alexa skills to invoke the lambda, subsequent deploys will work
        # when specifying the eventSourceToken
        ###
        alexa_lambda.add_permission(
            'AlexaPermission',
            # eventSourceToken: skill.ref,
            principal=ServicePrincipal('alexa-appkit.amazon.com'),
            action='lambda:InvokeFunction')
Beispiel #21
0
 def add_role(self, name: str) -> Role:
     return Role(
         self,
         f"Role-{name}",
         assumed_by=ServicePrincipal("ecs-tasks.amazonaws.com"),
     )
Beispiel #22
0
    def __init__(
            self,
            scope: Stack,
            id: str,
            on_create_action: Dict[str, Any],
            on_update_action: Dict[str, Any],
            on_delete_action: Dict[str, Any],
    ) -> None:
        """
        Constructor.

        :param scope: CloudFormation stack in which resources should be placed.
        :param id: Name (id) or prefix for resources.
        :param on_create_action: Create action arguments. Read more on:
        https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.create_service
        :param on_update_action: Update action arguments. Read more on:
        https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.update_service
        :param on_delete_action: Delete action arguments. Read more on:
        https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.delete_service
        """
        self.__role = Role(
            scope=scope,
            id=id + 'Role',
            role_name=id + 'Role',
            assumed_by=CompositePrincipal(
                ServicePrincipal("lambda.amazonaws.com"),
                ServicePrincipal("cloudformation.amazonaws.com")
            ),
            inline_policies={
                id + 'Policy': PolicyDocument(
                    statements=[
                        PolicyStatement(
                            actions=[
                                'ecs:createService',
                                'ecs:updateService',
                                'ecs:deleteService',
                                'ecs:describeServices',
                                'ecs:listServices',
                                'ecs:updateServicePrimaryTaskSet'
                            ],
                            effect=Effect.ALLOW,
                            resources=['*']
                        ),
                        PolicyStatement(
                            actions=[
                                "logs:CreateLogGroup",
                                "logs:CreateLogStream",
                                "logs:PutLogEvents"
                            ],
                            effect=Effect.ALLOW,
                            resources=['*']
                        ),
                    ]
                )
            },
            managed_policies=[]
        )

        self.__custom_backend = Function(
            scope=scope,
            id=id + 'Backend',
            code=Code.from_asset(
                path=package_root
            ),
            handler='index.handler',
            runtime=Runtime.PYTHON_3_6,
            description=f'A custom resource backend to manage ecs {id} service.',
            function_name=id + 'Backend',
            memory_size=128,
            role=self.__role,
            timeout=Duration.seconds(900),
        )

        # noinspection PyTypeChecker
        provider: ICustomResourceProvider = CustomResourceProvider.from_lambda(self.__custom_backend)

        self.__custom_resource = CustomResource(
            scope=scope,
            id=id + 'CustomResource',
            provider=provider,
            removal_policy=RemovalPolicy.DESTROY,
            properties={
                'onCreate': on_create_action,
                'onUpdate': on_update_action,
                'onDelete': on_delete_action
            },
            resource_type='Custom::EcsService'
        )

        # Make sure that custom resource is deleted before lambda function backend.
        self.__custom_resource.node.add_dependency(self.__custom_backend)
Beispiel #23
0
 def __init__(self, scope: Construct, _id: str, **kwargs) -> None:
     super().__init__(scope, _id, **kwargs)
     bucket = Bucket(self, 'Bucket')
     LambdaRestApi(
         self,
         'Api',
         default_cors_preflight_options=CorsOptions(
             allow_origins=['*']
         ),
         handler=DockerImageFunction(
             self,
             'Function',
             code=DockerImageCode.from_image_asset(directory=os.getcwd(), file='Dockerfile', exclude=['cdk.out']),
             environment={
                 'S3_BUCKET': bucket.bucket_name,
                 'SPOTIFY_USER_ID': environ.get('SPOTIFY_USER_ID'),
                 'SPOTIFY_CLIENT_ID': environ.get('SPOTIFY_CLIENT_ID'),
                 'SPOTIFY_CLIENT_SECRET': environ.get('SPOTIFY_CLIENT_SECRET'),
                 'SPOTIFY_REDIRECT_URI': environ.get('SPOTIFY_REDIRECT_URI'),
                 'SPOTIFY_REFRESH_TOKEN': environ.get('SPOTIFY_REFRESH_TOKEN')
             },
             role=Role(
                 self,
                 'LambdaRole',
                 assumed_by=cast(IPrincipal, ServicePrincipal('lambda.amazonaws.com')),
                 inline_policies={
                     's3': PolicyDocument(
                         statements=[
                             PolicyStatement(
                                 effect=Effect.ALLOW,
                                 actions=[
                                     's3:ListBucket',
                                     's3:PutObject',
                                     's3:GetObject',
                                     's3:DeleteObject',
                                     's3:ListObjects'
                                 ],
                                 resources=[
                                     'arn:aws:s3:::*'
                                 ]
                             )
                         ]
                     ),
                     'lambda': PolicyDocument(
                         statements=[
                             PolicyStatement(
                                 effect=Effect.ALLOW,
                                 actions=['lambda:InvokeFunction'],
                                 resources=['arn:aws:lambda:*:*:function:*']
                             )
                         ]
                     ),
                     'logs': PolicyDocument(
                         statements=[
                             PolicyStatement(
                                 effect=Effect.ALLOW,
                                 actions=[
                                     'logs:CreateLogGroup',
                                     'logs:CreateLogStream',
                                     'logs:PutLogEvents'
                                 ],
                                 resources=['*']
                             )
                         ]
                     )
                 }
             )
         )
     )
Beispiel #24
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here
        table = dynamodb.Table(
            self,
            "TheTable",
            table_name="cdk-table",
            partition_key=dynamodb.Attribute(
                name="id", type=dynamodb.AttributeType.STRING),
            removal_policy=cdk.RemovalPolicy.DESTROY,
        )

        # compute_environment = batch.ComputeEnvironment(
        #     self,
        #     "MyComputeEnvironment",
        #     compute_environment_name="cdk-env",
        #     compute_resources=batch.ComputeResources(
        #         vpc=Vpc.from_lookup(self, "VPC", is_default=True),
        #     ),
        #     enabled=True,
        #     managed=True,
        # )

        job_role = Role(
            self,
            "BatchJobRole",
            assumed_by=ServicePrincipal("ecs-tasks.amazonaws.com"),
            description="Role for a container in a Batch job",
            role_name="CDK-BatchJobRole",
            managed_policies=[
                ManagedPolicy.from_aws_managed_policy_name(
                    managed_policy_name="AmazonDynamoDBFullAccess"),
            ],
        )

        repository = Repository(
            self,
            "MyRepository",
            removal_policy=cdk.RemovalPolicy.DESTROY,
            repository_name="cdk-my-repository",
            lifecycle_rules=[
                LifecycleRule(max_image_count=5, description="Max 5 images")
            ],
        )

        image: ContainerImage = ContainerImage.from_ecr_repository(
            repository=repository,
            tag="latest",
        )

        container = batch.JobDefinitionContainer(
            image=image,
            job_role=job_role,
            command=["python", "run.py", "--help"],
            environment={
                "READINGS_TABLE": table.table_name,
                "AWS_REGION": self.region,
            },
            vcpus=1,
            log_configuration=batch.LogConfiguration(
                log_driver=batch.LogDriver.AWSLOGS),
            memory_limit_mib=2048,
        )

        batch.JobDefinition(
            self,
            "JobDefinitionCreate",
            container=container,
            job_definition_name="create",
            retry_attempts=1,
        )
Beispiel #25
0
    def __init__(self,
                 scope: Stack,
                 id: str,
                 capacity: Optional[AddCapacityOptions] = None,
                 cluster_name: Optional[str] = None,
                 container_insights: Optional[bool] = None,
                 default_cloud_map_namespace: Optional[
                     CloudMapNamespaceOptions] = None,
                 vpc: Optional[IVpc] = None,
                 **kwargs) -> None:
        known_args = dict(
            scope=scope,
            id=id,
            capacity=capacity,
            cluster_name=cluster_name,
            container_insights=container_insights,
            default_cloud_map_namespace=default_cloud_map_namespace,
            vpc=vpc)

        unknown_args = kwargs

        super().__init__(**{**known_args, **unknown_args})

        self.__role = Role(
            scope=scope,
            id=cluster_name + 'CustomResourceRole',
            role_name=cluster_name + 'CustomResourceRole',
            assumed_by=CompositePrincipal(
                ServicePrincipal("lambda.amazonaws.com"),
                ServicePrincipal("cloudformation.amazonaws.com")),
            inline_policies={
                cluster_name + 'CustomResourcePolicy':
                PolicyDocument(statements=[
                    PolicyStatement(actions=[
                        "ecs:ListClusters",
                        "ecs:ListContainerInstances",
                        "ecs:ListServices",
                        "ecs:ListTaskDefinitions",
                        "ecs:ListTasks",
                        "ecs:DescribeClusters",
                        "ecs:DescribeContainerInstances",
                        "ecs:DescribeServices",
                        "ecs:DescribeTaskDefinition",
                        "ecs:DescribeTasks",
                        "ecs:CreateCluster",
                        "ecs:DeleteCluster",
                        "ecs:DeleteService",
                        "ecs:DeregisterContainerInstance",
                        "ecs:DeregisterTaskDefinition",
                        "ecs:StopTask",
                        "ecs:UpdateService",
                    ],
                                    effect=Effect.ALLOW,
                                    resources=['*']),
                    PolicyStatement(actions=[
                        "logs:CreateLogGroup", "logs:CreateLogStream",
                        "logs:PutLogEvents"
                    ],
                                    effect=Effect.ALLOW,
                                    resources=['*']),
                ])
            },
            managed_policies=[])

        self.__custom_backend = Function(
            scope=scope,
            id=cluster_name + 'Deleter',
            code=Code.from_asset(path=package_root),
            handler='index.handler',
            runtime=Runtime.PYTHON_3_6,
            description=
            f'A custom resource backend to delete ecs cluster ({cluster_name}) in the right way.',
            function_name=cluster_name + 'Deleter',
            memory_size=128,
            role=self.__role,
            timeout=Duration.seconds(900),
        )

        # noinspection PyTypeChecker
        provider: ICustomResourceProvider = CustomResourceProvider.from_lambda(
            self.__custom_backend)

        self.__custom_resource = CustomResource(
            scope=scope,
            id=cluster_name + 'CustomResource',
            provider=provider,
            removal_policy=RemovalPolicy.DESTROY,
            properties={'clusterName': cluster_name},
            resource_type='Custom::EmptyS3Bucket')

        # Make sure that custom resource is deleted before lambda function backend.
        self.__custom_resource.node.add_dependency(self.__custom_backend)
        # Make sure that custom resource is deleted before the bucket.
        self.__custom_resource.node.add_dependency(self)
    def __init__(self, scope, id, **kwargs):

        super().__init__(scope, id, **kwargs)

        # Each publisher instance authenticates as an individual user stored in this pool
        user_pool = UserPool(self,
                             'user-pool',
                             user_pool_name='amazon-cloudwatch-publisher')

        # Set up the client app with simple username/password authentication
        user_pool_client = UserPoolClient(
            self,
            'user-pool-client',
            user_pool=user_pool,
            enabled_auth_flows=[AuthFlow.USER_PASSWORD],
            user_pool_client_name='amazon-cloudwatch-publisher')

        # The identity pool exists to associate users to roles they can assume
        identity_pool = CfnIdentityPool(
            self,
            'identity-pool',
            identity_pool_name='amazon-cloudwatch-publisher',
            allow_unauthenticated_identities=False)

        # Setting this property links the identity pool to the user pool client app
        identity_pool.add_property_override(
            property_path='CognitoIdentityProviders',
            value=[{
                'ClientId':
                user_pool_client.user_pool_client_id,
                'ProviderName':
                'cognito-idp.{0}.amazonaws.com/{1}'.format(
                    self.region, user_pool.user_pool_id)
            }])

        # Only identities that come from Congito users should be able to assume the publisher role
        principal = FederatedPrincipal(
            federated='cognito-identity.amazonaws.com',
            assume_role_action='sts:AssumeRoleWithWebIdentity',
            conditions={
                'StringEquals': {
                    'cognito-identity.amazonaws.com:aud': identity_pool.ref
                },
                'ForAnyValue:StringLike': {
                    'cognito-identity.amazonaws.com:amr': 'authenticated'
                }
            })

        # Minimum set of permissions required to push metrics and logs
        policy = PolicyDocument(statements=[
            PolicyStatement(effect=Effect.ALLOW,
                            actions=[
                                'cloudwatch:PutMetricData',
                                'logs:CreateLogGroup',
                                'logs:CreateLogStream',
                                'logs:DescribeLogGroups',
                                'logs:DescribeLogStreams',
                                'logs:PutLogEvents',
                            ],
                            resources=[
                                '*',
                            ])
        ])

        # Create the role itself using the principal and policy defined above
        role = Role(self,
                    'role',
                    assumed_by=principal,
                    inline_policies=[policy])

        # Associate the above role with the identity pool; we don't want
        # any unauthenticated access so explicitly ensure it's set to None
        CfnIdentityPoolRoleAttachment(
            self,
            'identity-pool-role-attachment-authenticated',
            identity_pool_id=identity_pool.ref,
            roles={
                'authenticated': role.role_arn,
                'unauthenticated': None
            })

        # Defining outputs here allows them to be scraped from the `cdk deploy` command
        CfnOutput(self, 'Region', value=self.region)
        CfnOutput(self, 'AccountId', value=self.account)
        CfnOutput(self, 'UserPoolId', value=user_pool.user_pool_id)
        CfnOutput(self, 'IdentityPoolId', value=identity_pool.ref)
        CfnOutput(self,
                  'AppClientId',
                  value=user_pool_client.user_pool_client_id)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # SQS queue
        state_change_sqs = Queue(
            self,
            "state_change_sqs",
            visibility_timeout=core.Duration.seconds(60)
        )

        # Dynamodb Tables
        # EC2 state changes
        tb_states = Table(
            self, "ec2_states", partition_key=Attribute(name="instance-id",
                type=AttributeType.STRING),
            sort_key=Attribute(
                name="time",
                type=AttributeType.STRING
            ),
            billing_mode=BillingMode.PAY_PER_REQUEST,
            removal_policy=core.RemovalPolicy.DESTROY,
            stream=StreamViewType.NEW_IMAGE)

        # EC2 inventory
        tb_inventory = Table(
            self, "ec2_inventory", partition_key=Attribute(name="instance-id",
                type=AttributeType.STRING),
            sort_key=Attribute(
                name="time",
                type=AttributeType.STRING
            ),
            billing_mode=BillingMode.PAY_PER_REQUEST,
            removal_policy=core.RemovalPolicy.DESTROY,
            stream=StreamViewType.KEYS_ONLY)

        # IAM policies - AWS managed
        basic_exec = ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole")
        sqs_access = ManagedPolicy(self, "LambdaSQSExecution",
            statements=[
                PolicyStatement(
                    effect=Effect.ALLOW,
                    actions=[
                        "sqs:ReceiveMessage",
                        "sqs:DeleteMessage",
                        "sqs:GetQueueAttributes"
                    ],
                    resources=[state_change_sqs.queue_arn]
                )])

        # IAM Policies
        pol_ec2_states_ro = ManagedPolicy(self, "pol_EC2StatesReadOnly",
            statements=[
                PolicyStatement(
                    effect=Effect.ALLOW,
                    actions=[
                        "dynamodb:DescribeStream",
                        "dynamodb:GetRecords",
                        "dynamodb:GetItem",
                        "dynamodb:GetShardIterator",
                        "dynamodb:ListStreams"
                    ],
                    resources=[tb_states.table_arn]
                )])

        pol_ec2_states_rwd = ManagedPolicy(
            self, "pol_EC2StatesWriteDelete",
            statements=[
                PolicyStatement(
                    effect=Effect.ALLOW,
                    actions=[
                        "dynamodb:DeleteItem",
                        "dynamodb:DescribeTable",
                        "dynamodb:PutItem",
                        "dynamodb:Query",
                        "dynamodb:UpdateItem"
                    ],
                    resources=[tb_states.table_arn]
                )])

        pol_ec2_inventory_full = ManagedPolicy(
            self, "pol_EC2InventoryFullAccess",
            statements=[
                PolicyStatement(
                    effect=Effect.ALLOW,
                    actions=[
                        "dynamodb:DeleteItem",
                        "dynamodb:DescribeTable",
                        "dynamodb:GetItem",
                        "dynamodb:PutItem",
                        "dynamodb:Query",
                        "dynamodb:UpdateItem"
                    ],
                    resources=[tb_inventory.table_arn]
                )])
        
        pol_lambda_describe_ec2 = ManagedPolicy(
            self, "pol_LambdaDescribeEC2",
            statements=[
                PolicyStatement(
                    effect=Effect.ALLOW,
                    actions=[
                        "ec2:Describe*"
                    ],
                    resources=["*"]
                )])

        # IAM Roles
        rl_event_capture = Role(
            self,
            'rl_state_capture',
            assumed_by=ServicePrincipal('lambda.amazonaws.com'),
            managed_policies=[basic_exec, sqs_access, pol_ec2_states_rwd]
            )

        rl_event_processor = Role(
            self,
            'rl_state_processor',
            assumed_by=ServicePrincipal('lambda.amazonaws.com'),
            managed_policies=[
                basic_exec,
                pol_ec2_states_ro,
                pol_ec2_states_rwd,
                pol_ec2_inventory_full,
                pol_lambda_describe_ec2])

        # event capture lambda
        lambda_event_capture = Function(
            self, "lambda_event_capture",
            handler="event_capture.handler",
            runtime=Runtime.PYTHON_3_7,
            code=Code.asset('event_capture'),
            role=rl_event_capture,
            events=[SqsEventSource(state_change_sqs)],
            environment={"state_table": tb_states.table_name}
        )

        # event processor lambda
        lambda_event_processor = Function(
            self, "lambda_event_processor",
            handler="event_processor.handler",
            runtime=Runtime.PYTHON_3_7,
            code=Code.asset('event_processor'),
            role=rl_event_processor,
            events=[
                DynamoEventSource(
                    tb_states,
                    starting_position=StartingPosition.LATEST)
            ],
            environment={
                "inventory_table": tb_inventory.table_name,
                }
        )

        # Cloudwatch Event
        event_ec2_change = Rule(
            self, "ec2_state_change",
            description="trigger on ec2 start, stop and terminate instances",
            event_pattern=EventPattern(
                source=["aws.ec2"],
                detail_type=["EC2 Instance State-change Notification"],
                detail={
                    "state": [
                        "running",
                        "stopped",
                        "terminated"]
                    }
                ),
            targets=[aws_events_targets.SqsQueue(state_change_sqs)]
        )

        # Outputs
        core.CfnOutput(self, "rl_state_capture_arn", value=rl_event_capture.role_arn)
        core.CfnOutput(self, "rl_state_processor_arn", value=rl_event_processor.role_arn)
        core.CfnOutput(self, "tb_states_arn", value=tb_states.table_arn)
        core.CfnOutput(self, "tb_inventory_arn", value=tb_inventory.table_arn)
        core.CfnOutput(self, "sqs_state_change", value=state_change_sqs.queue_arn)
Beispiel #28
0
    def __init__(
        self,
        scope: Construct,
        id: str,
        elasticsearch_index: ElasticsearchIndexResource,
        dynamodb_table: Table,
        kms_key: Optional[Key] = None,
    ) -> None:
        super().__init__(scope=scope, id=id)

        elasticsearch_layer = BElasticsearchLayer(
            scope=self, name=f"{id}ElasticsearchLayer")

        initial_cloner_function = SingletonFunction(
            scope=self,
            id="InitialClonerFunction",
            uuid="e01116a4-f939-43f2-8f5b-cc9f862c9e01",
            lambda_purpose="InitialClonerSingletonLambda",
            code=Code.from_asset(initial_cloner_root),
            handler="index.handler",
            runtime=Runtime.PYTHON_3_8,
            layers=[elasticsearch_layer],
            log_retention=RetentionDays.ONE_MONTH,
            memory_size=128,
            timeout=Duration.minutes(15),
            role=Role(
                scope=self,
                id="InitialClonerFunctionRole",
                assumed_by=ServicePrincipal("lambda.amazonaws.com"),
                inline_policies={
                    "LogsPolicy":
                    PolicyDocument(statements=[
                        PolicyStatement(
                            actions=[
                                "logs:CreateLogGroup",
                                "logs:CreateLogStream",
                                "logs:PutLogEvents",
                                "logs:DescribeLogStreams",
                            ],
                            resources=["arn:aws:logs:*:*:*"],
                            effect=Effect.ALLOW,
                        )
                    ]),
                    "ElasticsearchPolicy":
                    PolicyDocument(statements=[
                        PolicyStatement(
                            actions=[
                                "es:ESHttpDelete",
                                "es:ESHttpGet",
                                "es:ESHttpHead",
                                "es:ESHttpPatch",
                                "es:ESHttpPost",
                                "es:ESHttpPut",
                            ],
                            resources=["*"],
                            effect=Effect.ALLOW,
                        )
                    ]),
                    "DynamodbPolicy":
                    PolicyDocument(statements=[
                        PolicyStatement(
                            actions=["dynamodb:*"],
                            resources=["*"],
                            effect=Effect.ALLOW,
                        )
                    ]),
                },
                description="Role for DynamoDB Initial Cloner Function",
            ),
        )

        if kms_key:
            initial_cloner_function.add_to_role_policy(
                PolicyStatement(
                    actions=["kms:Decrypt"],
                    resources=[kms_key.key_arn],
                    effect=Effect.ALLOW,
                ), )

        initial_cloner = CustomResource(
            scope=self,
            id="InitialCloner",
            service_token=initial_cloner_function.function_arn,
            removal_policy=RemovalPolicy.DESTROY,
            properties={
                "DynamodbTableName":
                dynamodb_table.table_name,
                "ElasticsearchIndexName":
                elasticsearch_index.index_name,
                "ElasticsearchEndpoint":
                elasticsearch_index.elasticsearch_domain.domain_endpoint,
            },
            resource_type="Custom::ElasticsearchInitialCloner",
        )

        primary_key_field = initial_cloner.get_att_string("PrimaryKeyField")

        dynamodb_stream_arn = dynamodb_table.table_stream_arn
        if not dynamodb_stream_arn:
            raise Exception("DynamoDB streams must be enabled for the table")

        dynamodb_event_source = DynamoEventSource(
            table=dynamodb_table,
            starting_position=StartingPosition.LATEST,
            enabled=True,
            max_batching_window=Duration.seconds(10),
            bisect_batch_on_error=True,
            parallelization_factor=2,
            batch_size=1000,
            retry_attempts=10,
        )

        cloner_function = Function(
            scope=self,
            id="ClonerFunction",
            code=Code.from_asset(cloner_root),
            handler="index.handler",
            runtime=Runtime.PYTHON_3_8,
            environment={
                "ES_INDEX_NAME": elasticsearch_index.index_name,
                "ES_DOMAIN_ENDPOINT":
                elasticsearch_index.elasticsearch_domain.domain_endpoint,
                "PRIMARY_KEY_FIELD": primary_key_field,
            },
            events=[dynamodb_event_source],
            layers=[elasticsearch_layer],
            log_retention=RetentionDays.ONE_MONTH,
            memory_size=128,
            role=Role(
                scope=self,
                id="ClonerFunctionRole",
                assumed_by=ServicePrincipal("lambda.amazonaws.com"),
                inline_policies={
                    "LogsPolicy":
                    PolicyDocument(statements=[
                        PolicyStatement(
                            actions=[
                                "logs:CreateLogGroup",
                                "logs:CreateLogStream",
                                "logs:PutLogEvents",
                                "logs:DescribeLogStreams",
                            ],
                            resources=["arn:aws:logs:*:*:*"],
                            effect=Effect.ALLOW,
                        )
                    ]),
                    "ElasticsearchPolicy":
                    PolicyDocument(statements=[
                        PolicyStatement(
                            actions=[
                                "es:ESHttpDelete",
                                "es:ESHttpGet",
                                "es:ESHttpHead",
                                "es:ESHttpPatch",
                                "es:ESHttpPost",
                                "es:ESHttpPut",
                            ],
                            resources=[
                                f"{elasticsearch_index.elasticsearch_domain.domain_arn}/*"
                            ],
                            effect=Effect.ALLOW,
                        )
                    ]),
                    "DynamodbStreamsPolicy":
                    PolicyDocument(statements=[
                        PolicyStatement(
                            actions=[
                                "dynamodb:DescribeStream",
                                "dynamodb:GetRecords",
                                "dynamodb:GetShardIterator",
                                "dynamodb:ListStreams",
                            ],
                            resources=[dynamodb_stream_arn],
                            effect=Effect.ALLOW,
                        )
                    ]),
                },
                description="Role for DynamoDB Cloner Function",
            ),
            timeout=Duration.seconds(30),
        )

        if kms_key:
            cloner_function.add_to_role_policy(
                PolicyStatement(
                    actions=["kms:Decrypt"],
                    resources=[kms_key.key_arn],
                    effect=Effect.ALLOW,
                ), )
    def __init__(self, scope: core.Construct, id: str, application_prefix: str,
                 suffix: str, kda_role: Role, **kwargs):
        super().__init__(scope, id, **kwargs)

        stack = Stack.of(self)
        region = stack.region

        # Create Cognito User Pool
        self.__user_pool = CfnUserPool(
            scope=self,
            id='UserPool',
            admin_create_user_config={'allowAdminCreateUserOnly': True},
            policies={'passwordPolicy': {
                'minimumLength': 8
            }},
            username_attributes=['email'],
            auto_verified_attributes=['email'],
            user_pool_name=application_prefix + '_user_pool')

        # Create a Cognito User Pool Domain using the newly created Cognito User Pool
        CfnUserPoolDomain(scope=self,
                          id='CognitoDomain',
                          domain=application_prefix + '-' + suffix,
                          user_pool_id=self.user_pool.ref)

        # Create Cognito Identity Pool
        self.__id_pool = CfnIdentityPool(
            scope=self,
            id='IdentityPool',
            allow_unauthenticated_identities=False,
            cognito_identity_providers=[],
            identity_pool_name=application_prefix + '_identity_pool')

        trust_relationship = FederatedPrincipal(
            federated='cognito-identity.amazonaws.com',
            conditions={
                'StringEquals': {
                    'cognito-identity.amazonaws.com:aud': self.id_pool.ref
                },
                'ForAnyValue:StringLike': {
                    'cognito-identity.amazonaws.com:amr': 'authenticated'
                }
            },
            assume_role_action='sts:AssumeRoleWithWebIdentity')
        # IAM role for master user
        master_auth_role = Role(scope=self,
                                id='MasterAuthRole',
                                assumed_by=trust_relationship)
        # Role for authenticated user
        limited_auth_role = Role(scope=self,
                                 id='LimitedAuthRole',
                                 assumed_by=trust_relationship)
        # Attach Role to Identity Pool
        CfnIdentityPoolRoleAttachment(
            scope=self,
            id='userPoolRoleAttachment',
            identity_pool_id=self.id_pool.ref,
            roles={'authenticated': limited_auth_role.role_arn})
        # Create master-user-group
        CfnUserPoolGroup(scope=self,
                         id='AdminsGroup',
                         user_pool_id=self.user_pool.ref,
                         group_name='master-user-group',
                         role_arn=master_auth_role.role_arn)
        # Create limited-user-group
        CfnUserPoolGroup(scope=self,
                         id='UsersGroup',
                         user_pool_id=self.user_pool.ref,
                         group_name='limited-user-group',
                         role_arn=limited_auth_role.role_arn)
        # Role for the Elasticsearch service to access Cognito
        es_role = Role(scope=self,
                       id='EsRole',
                       assumed_by=ServicePrincipal(service='es.amazonaws.com'),
                       managed_policies=[
                           ManagedPolicy.from_aws_managed_policy_name(
                               'AmazonESCognitoAccess')
                       ])

        # Use the following command line to generate the python dependencies layer content
        # pip3 install -t lambda-layer/python/lib/python3.8/site-packages -r lambda/requirements.txt
        # Build the lambda layer assets
        subprocess.call([
            'pip', 'install', '-t',
            'streaming/streaming_cdk/lambda-layer/python/lib/python3.8/site-packages',
            '-r', 'streaming/streaming_cdk/bootstrap-lambda/requirements.txt',
            '--upgrade'
        ])

        requirements_layer = _lambda.LayerVersion(
            scope=self,
            id='PythonRequirementsTemplate',
            code=_lambda.Code.from_asset(
                'streaming/streaming_cdk/lambda-layer'),
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_8])

        # This lambda function will bootstrap the Elasticsearch cluster
        bootstrap_function_name = 'AESBootstrap'
        register_template_lambda = _lambda.Function(
            scope=self,
            id='RegisterTemplate',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset(
                'streaming/streaming_cdk/bootstrap-lambda'),
            handler='es-bootstrap.lambda_handler',
            environment={
                'REGION': region,
                'KDA_ROLE_ARN': kda_role.role_arn,
                'MASTER_ROLE_ARN': master_auth_role.role_arn
            },
            layers=[requirements_layer],
            timeout=Duration.minutes(15),
            function_name=bootstrap_function_name)

        lambda_role = register_template_lambda.role
        lambda_role.add_to_policy(
            PolicyStatement(
                actions=['logs:CreateLogGroup'],
                resources=[stack.format_arn(service='logs', resource='*')]))
        lambda_role.add_to_policy(
            PolicyStatement(
                actions=['logs:CreateLogStream', 'logs:PutLogEvents'],
                resources=[
                    stack.format_arn(service='logs',
                                     resource='log_group',
                                     resource_name='/aws/lambda/' +
                                     bootstrap_function_name + ':*')
                ]))

        # Let the lambda assume the master role so that actions can be executed on the cluster
        # https://aws.amazon.com/premiumsupport/knowledge-center/lambda-function-assume-iam-role/
        lambda_role.add_to_policy(
            PolicyStatement(actions=['sts:AssumeRole'],
                            resources=[master_auth_role.role_arn]))

        master_auth_role.assume_role_policy.add_statements(
            PolicyStatement(actions=['sts:AssumeRole'],
                            principals=[lambda_role]))

        # List all the roles that are allowed to access the Elasticsearch cluster.
        roles = [
            ArnPrincipal(limited_auth_role.role_arn),
            ArnPrincipal(master_auth_role.role_arn),
            ArnPrincipal(kda_role.role_arn)
        ]  # The users
        if register_template_lambda and register_template_lambda.role:
            roles.append(ArnPrincipal(
                lambda_role.role_arn))  # The lambda used to bootstrap
        # Create kms key
        kms_key = Key(scope=self,
                      id='kms-es',
                      alias='custom/es',
                      description='KMS key for Elasticsearch domain',
                      enable_key_rotation=True)

        # AES Log Groups
        es_app_log_group = logs.LogGroup(scope=self,
                                         id='EsAppLogGroup',
                                         retention=logs.RetentionDays.ONE_WEEK,
                                         removal_policy=RemovalPolicy.RETAIN)

        # Create the Elasticsearch domain
        es_domain_arn = stack.format_arn(service='es',
                                         resource='domain',
                                         resource_name=application_prefix +
                                         '/*')

        es_access_policy = PolicyDocument(statements=[
            PolicyStatement(principals=roles,
                            actions=[
                                'es:ESHttpGet', 'es:ESHttpPut',
                                'es:ESHttpPost', 'es:ESHttpDelete'
                            ],
                            resources=[es_domain_arn])
        ])
        self.__es_domain = es.CfnDomain(
            scope=self,
            id='searchDomain',
            elasticsearch_cluster_config={
                'instanceType': 'r5.large.elasticsearch',
                'instanceCount': 2,
                'dedicatedMasterEnabled': True,
                'dedicatedMasterCount': 3,
                'dedicatedMasterType': 'r5.large.elasticsearch',
                'zoneAwarenessEnabled': True,
                'zoneAwarenessConfig': {
                    'AvailabilityZoneCount': '2'
                },
            },
            encryption_at_rest_options={
                'enabled': True,
                'kmsKeyId': kms_key.key_id
            },
            node_to_node_encryption_options={'enabled': True},
            ebs_options={
                'volumeSize': 10,
                'ebsEnabled': True
            },
            elasticsearch_version='7.9',
            domain_name=application_prefix,
            access_policies=es_access_policy,
            cognito_options={
                'enabled': True,
                'identityPoolId': self.id_pool.ref,
                'roleArn': es_role.role_arn,
                'userPoolId': self.user_pool.ref
            },
            advanced_security_options={
                'enabled': True,
                'internalUserDatabaseEnabled': False,
                'masterUserOptions': {
                    'masterUserArn': master_auth_role.role_arn
                }
            },
            domain_endpoint_options={
                'enforceHttps': True,
                'tlsSecurityPolicy': 'Policy-Min-TLS-1-2-2019-07'
            },
            # log_publishing_options={
            #     # 'ES_APPLICATION_LOGS': {
            #     #     'enabled': True,
            #     #     'cloud_watch_logs_log_group_arn': es_app_log_group.log_group_arn
            #     # },
            #     # 'AUDIT_LOGS': {
            #     #     'enabled': True,
            #     #     'cloud_watch_logs_log_group_arn': ''
            #     # },
            #     # 'SEARCH_SLOW_LOGS': {
            #     #     'enabled': True,
            #     #     'cloud_watch_logs_log_group_arn': ''
            #     # },
            #     # 'INDEX_SLOW_LOGS': {
            #     #     'enabled': True,
            #     #     'cloud_watch_logs_log_group_arn': ''
            #     # }
            # }
        )

        # Not yet on the roadmap...
        # See https://github.com/aws-cloudformation/aws-cloudformation-coverage-roadmap/issues/283
        # self.es_domain.add_property_override('ElasticsearchClusterConfig.WarmEnabled', True)
        # self.es_domain.add_property_override('ElasticsearchClusterConfig.WarmCount', 2)
        # self.es_domain.add_property_override('ElasticsearchClusterConfig.WarmType', 'ultrawarm1.large.elasticsearch')

        # Deny all roles from the authentication provider - users must be added to groups
        # This lambda function will bootstrap the Elasticsearch cluster
        cognito_function_name = 'CognitoFix'
        cognito_template_lambda = _lambda.Function(
            scope=self,
            id='CognitoFixLambda',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset(
                'streaming/streaming_cdk/cognito-lambda'),
            handler='handler.handler',
            environment={
                'REGION': scope.region,
                'USER_POOL_ID': self.__user_pool.ref,
                'IDENTITY_POOL_ID': self.__id_pool.ref,
                'LIMITED_ROLE_ARN': limited_auth_role.role_arn
            },
            timeout=Duration.minutes(15),
            function_name=cognito_function_name)

        lambda_role = cognito_template_lambda.role
        lambda_role.add_to_policy(
            PolicyStatement(
                actions=['logs:CreateLogGroup'],
                resources=[stack.format_arn(service='logs', resource='*')]))
        lambda_role.add_to_policy(
            PolicyStatement(
                actions=['logs:CreateLogStream', 'logs:PutLogEvents'],
                resources=[
                    stack.format_arn(service='logs',
                                     resource='log_group',
                                     resource_name='/aws/lambda/' +
                                     cognito_function_name + ':*')
                ]))
        lambda_role.add_to_policy(
            PolicyStatement(actions=['cognito-idp:ListUserPoolClients'],
                            resources=[self.user_pool.attr_arn]))
        lambda_role.add_to_policy(
            PolicyStatement(actions=['iam:PassRole'],
                            resources=[limited_auth_role.role_arn]))

        cognito_id_res = Fn.join(':', [
            'arn:aws:cognito-identity', scope.region, scope.account,
            Fn.join('/', ['identitypool', self.__id_pool.ref])
        ])

        lambda_role.add_to_policy(
            PolicyStatement(actions=['cognito-identity:SetIdentityPoolRoles'],
                            resources=[cognito_id_res]))

        # Get the Domain Endpoint and register it with the lambda as environment variable.
        register_template_lambda.add_environment(
            'DOMAIN', self.__es_domain.attr_domain_endpoint)

        CfnOutput(scope=self,
                  id='createUserUrl',
                  description="Create a new user in the user pool here.",
                  value="https://" + scope.region +
                  ".console.aws.amazon.com/cognito/users?region=" +
                  scope.region + "#/pool/" + self.user_pool.ref + "/users")
        CfnOutput(scope=self,
                  id='kibanaUrl',
                  description="Access Kibana via this URL.",
                  value="https://" + self.__es_domain.attr_domain_endpoint +
                  "/_plugin/kibana/")

        bootstrap_lambda_provider = Provider(
            scope=self,
            id='BootstrapLambdaProvider',
            on_event_handler=register_template_lambda)
        CustomResource(scope=self,
                       id='ExecuteRegisterTemplate',
                       service_token=bootstrap_lambda_provider.service_token,
                       properties={'Timeout': 900})

        cognito_lambda_provider = Provider(
            scope=self,
            id='CognitoFixLambdaProvider',
            on_event_handler=cognito_template_lambda)
        cognito_fix_resource = CustomResource(
            scope=self,
            id='ExecuteCognitoFix',
            service_token=cognito_lambda_provider.service_token)
        cognito_fix_resource.node.add_dependency(self.__es_domain)
Beispiel #30
0
    def __init__(self, scope: Construct, stack_id: str, *, props: SEPStackProps, **kwargs):
        """
        Initialize a new instance of SEPStack
        :param scope: The scope of this construct.
        :param stack_id: The ID of this construct.
        :param props: The properties for this construct.
        :param kwargs: Any kwargs that need to be passed on to the parent class.
        """
        super().__init__(scope, stack_id, **kwargs)

         # The VPC that all components of the render farm will be created in.
        vpc = Vpc(
            self,
            'Vpc',
            max_azs=2,
        )

        recipes = ThinkboxDockerRecipes(
            self,
            'Image',
            stage=Stage.from_directory(props.docker_recipes_stage_path),
        )

        repository = Repository(
            self,
            'Repository',
            vpc=vpc,
            version=recipes.version,
            repository_installation_timeout=Duration.minutes(20),
            # TODO - Evaluate deletion protection for your own needs. These properties are set to RemovalPolicy.DESTROY
            # to cleanly remove everything when this stack is destroyed. If you would like to ensure
            # that these resources are not accidentally deleted, you should set these properties to RemovalPolicy.RETAIN
            # or just remove the removal_policy parameter.
            removal_policy=RepositoryRemovalPolicies(
                database=RemovalPolicy.DESTROY,
                filesystem=RemovalPolicy.DESTROY,
            ),
        )

        host = 'renderqueue'
        zone_name = 'deadline-test.internal'

        # Internal DNS zone for the VPC.
        dns_zone = PrivateHostedZone(
            self,
            'DnsZone',
            vpc=vpc,
            zone_name=zone_name,
        )

        ca_cert = X509CertificatePem(
            self,
            'RootCA',
            subject=DistinguishedName(
                cn='SampleRootCA',
            ),
        )

        server_cert = X509CertificatePem(
            self,
            'RQCert',
            subject=DistinguishedName(
                cn=f'{host}.{dns_zone.zone_name}',
                o='RFDK-Sample',
                ou='RenderQueueExternal',
            ),
            signing_certificate=ca_cert,
        )

        render_queue = RenderQueue(
            self,
            'RenderQueue',
            vpc=vpc,
            version=recipes.version,
            images=recipes.render_queue_images,
            repository=repository,
            # TODO - Evaluate deletion protection for your own needs. This is set to false to
            # cleanly remove everything when this stack is destroyed. If you would like to ensure
            # that this resource is not accidentally deleted, you should set this to true.
            deletion_protection=False,
            hostname=RenderQueueHostNameProps(
                hostname=host,
                zone=dns_zone,
            ),
            traffic_encryption=RenderQueueTrafficEncryptionProps(
                external_tls=RenderQueueExternalTLSProps(
                    rfdk_certificate=server_cert,
                ),
                internal_protocol=ApplicationProtocol.HTTPS,
            ),
        )

        if props.create_resource_tracker_role:
            # Creates the Resource Tracker Access role. This role is required to exist in your account so the resource tracker will work properly
            Role(
                self,
                'ResourceTrackerRole',
                assumed_by=ServicePrincipal('lambda.amazonaws.com'),
                managed_policies= [ManagedPolicy.from_aws_managed_policy_name('AWSThinkboxDeadlineResourceTrackerAccessPolicy')],
                role_name= 'DeadlineResourceTrackerAccessRole',
            )

        fleet = SpotEventPluginFleet(
            self,
            'SpotEventPluginFleet',
            vpc=vpc,
            render_queue=render_queue,
            deadline_groups=['group_name'],
            instance_types=[InstanceType.of(InstanceClass.BURSTABLE3, InstanceSize.LARGE)],
            worker_machine_image=props.worker_machine_image,
            max_capacity=1,
        )

        # Optional: Add additional tags to both spot fleet request and spot instances.
        Tags.of(fleet).add('name', 'SEPtest')

        ConfigureSpotEventPlugin(
            self,
            'ConfigureSpotEventPlugin',
            vpc=vpc,
            render_queue=render_queue,
            spot_fleets=[fleet],
            configuration=SpotEventPluginSettings(
                enable_resource_tracker=True,
            ),
        )