Ejemplo n.º 1
0
 def add_uploads_policy(bucket: aws_s3.Bucket, role: aws_iam.Role):
     policy = aws_iam.PolicyStatement(
         effect=aws_iam.Effect.ALLOW,
         actions=["s3:PutObject"],
         resources=[f"arn:aws:s3:::{bucket.bucket_name}/uploads/*"],
     )
     role.add_to_policy(policy)
Ejemplo n.º 2
0
    def attach_iam_policies_to_role(cls, role: Role):
        """
        Attach the necessary policies to read secrets from SSM and SecretsManager

        :param role:
        :param zone_id:
        :return:
        """
        # TODO: Extract this in a managed policy
        route53_policy = PolicyStatement(
            resources=["*"],
            effect=Effect.ALLOW,
            actions=[
                "route53:ListHostedZones",
                "route53:ListResourceRecordSets",
            ],
        )
        route53_recordset_policy = PolicyStatement(
            resources=["arn:aws:route53:::hostedzone/*"
                       ],  # To be restricted to interested zone
            effect=Effect.ALLOW,
            actions=[
                "route53:ChangeResourceRecordSets",
                "route53:ListTagsForResource",
            ],
        )
        role.add_to_policy(route53_policy)
        role.add_to_policy(route53_recordset_policy)
Ejemplo n.º 3
0
    def attach_iam_policies_to_role(cls, role: Role):
        """
        Attach the inline policies necessary to manage autoscaling using the kubernetes cluster autoscaler

        :param role:
        :return:
        """
        # TODO: Extract this in a managed policy
        policies: Dict[str, PolicyStatement] = {
            'cluster_autoscaler':
            PolicyStatement(
                resources=["*"],
                effect=Effect.ALLOW,
                actions=[
                    "autoscaling:DescribeAutoScalingGroups",
                    "autoscaling:DescribeAutoScalingInstances",
                    "autoscaling:DescribeLaunchConfigurations",
                    "autoscaling:DescribeTags",
                    "autoscaling:SetDesiredCapacity",
                    "autoscaling:TerminateInstanceInAutoScalingGroup",
                    "ec2:DescribeLaunchTemplateVersions",
                ]),
        }

        for policy in policies.values():
            role.add_to_policy(policy)
Ejemplo n.º 4
0
 def add_user_specific_policy(bucket: aws_s3.Bucket, role: aws_iam.Role, prefix: str):
     policy = aws_iam.PolicyStatement(
         effect=aws_iam.Effect.ALLOW,
         actions=["s3:GetObject", "s3:PutObject", "s3:DeleteObject"],
         resources=[
             f"arn:aws:s3:::{bucket.bucket_name}/{prefix}/${{cognito-identity.amazonaws.com:sub}}/*"  # noqa: E501
         ],
     )
     role.add_to_policy(policy)
Ejemplo n.º 5
0
 def add_public_policy(bucket: aws_s3.Bucket, role: aws_iam.Role, is_auth_role: bool):
     actions = ["s3:GetObject"]
     if is_auth_role:
         actions.extend(["s3:PutObject", "s3:DeleteObject"])
     policy = aws_iam.PolicyStatement(
         effect=aws_iam.Effect.ALLOW,
         actions=actions,
         resources=[f"arn:aws:s3:::{bucket.bucket_name}/public/*"],
     )
     role.add_to_policy(policy)
Ejemplo n.º 6
0
    def _configure_mutual_assume_role(self, role: iam.Role):
        self._roles.instance_role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                principals=[iam.ArnPrincipal(role.role_arn)],
                                actions=['sts:AssumeRole']))

        role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                principals=[
                                    iam.ArnPrincipal(
                                        self._roles.instance_role.role_arn)
                                ],
                                actions=['sts:AssumeRole']))
Ejemplo n.º 7
0
    def add_list_policy(bucket: aws_s3.Bucket, role: aws_iam.Role,
                        is_auth_role: bool):
        policy = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=["s3:ListBucket"],
            resources=[f"arn:aws:s3:::{bucket.bucket_name}"],
        )

        prefixes = ["public/", "public/*", "protected/", "protected/*"]
        if is_auth_role:
            prefixes.extend([
                "private/${cognito-identity.amazonaws.com:sub}/",
                "private/${cognito-identity.amazonaws.com:sub}/*",
            ])
        policy.add_conditions({"StringLike": {"s3:prefix": prefixes}})
        role.add_to_policy(policy)
Ejemplo n.º 8
0
    def __role(self) -> Role:
        """
        A role for custom resource which manages git commits to codecommit.

        :return: Custom resource's role.
        """
        return Role(
            self.__stack,
            self.__prefix + 'CiCdLambdaCustomCommitRole',
            inline_policies={
                self.__prefix + 'CiCdLambdaCustomCommitPolicy':
                PolicyDocument(statements=[
                    PolicyStatement(
                        actions=[
                            "codecommit:CreateCommit",
                        ],
                        resources=[self.__code_repository.repository_arn],
                        effect=Effect.ALLOW),
                    PolicyStatement(actions=[
                        "logs:CreateLogGroup", "logs:CreateLogStream",
                        "logs:PutLogEvents"
                    ],
                                    resources=['*'],
                                    effect=Effect.ALLOW)
                ])
            },
            assumed_by=ServicePrincipal('lambda.amazonaws.com'))
Ejemplo n.º 9
0
    def __init__(self, scope: BaseApp, id: str, vpc: Vpc, env_fqdn: str,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        kubernetes_version = scope.environment_config.get(
            'eks', {}).get('kubernetesVersion')
        cluster_name = scope.prefixed_str(
            scope.environment_config.get('eks', {}).get('clusterName'))

        cluster_admin_role = Role(
            self,
            scope.prefixed_str('EKS-AdminRole'),
            role_name=scope.prefixed_str('EKS-AdminRole'),
            assumed_by=AccountRootPrincipal(),
        )

        self.__cluster = eks_cluster = Cluster(
            self,
            cluster_name,
            cluster_name=cluster_name,
            vpc=vpc,
            version=KubernetesVersion.of(kubernetes_version),
            default_capacity=0,  # We define later the capacity
            masters_role=cluster_admin_role,
            vpc_subnets=self._get_control_plane_subnets(
                scope),  # Control plane subnets
        )

        for profile in scope.environment_config.get('eks', {}).get(
                'fargateProfiles', []):
            eks_cluster.add_fargate_profile(
                profile.get('name'),
                selectors=[
                    Selector(namespace=profile.get('namespace'),
                             labels=profile.get('labels'))
                ])

        asg_fleets = []
        for fleet in scope.environment_config.get('eks',
                                                  {}).get('workerNodesFleets'):
            if fleet.get('type') == 'managed':
                self.add_managed_fleet(eks_cluster, fleet)
            if fleet.get('type') == 'ASG':
                asg_fleets += self.add_asg_fleet(scope, eks_cluster, fleet)

        self._enable_cross_fleet_communication(asg_fleets)

        # Base cluster applications
        MetricsServer.add_to_cluster(eks_cluster)
        ClusterAutoscaler.add_to_cluster(eks_cluster, kubernetes_version)
        ExternalSecrets.add_to_cluster(eks_cluster)
        CertManager.add_to_cluster(eks_cluster)

        # Monitoring applications
        PrometheusOperator.add_to_cluster(eks_cluster)
        Grafana.add_to_cluster(eks_cluster, env_fqdn)

        # Logging & tracing applications
        Fluentd.add_to_cluster(eks_cluster)
        Loki.add_to_cluster(eks_cluster)
Ejemplo n.º 10
0
    def _create_s3_access_role(self, identity_pool: CfnIdentityPool,
                               s3_bucket: Bucket) -> Role:
        role = Role(self,
                    'DemoRole',
                    role_name='CognitoDemoBucketAccess',
                    assumed_by=WebIdentityPrincipal(
                        'cognito-identity.amazonaws.com',
                        conditions={
                            'StringEquals': {
                                'cognito-identity.amazonaws.com:aud':
                                identity_pool.ref
                            }
                        }),
                    inline_policies={
                        'ListBucket':
                        PolicyDocument(statements=[
                            PolicyStatement(effect=Effect.ALLOW,
                                            actions=['s3:ListBucket'],
                                            resources=[s3_bucket.bucket_arn])
                        ])
                    })

        CfnOutput(self, 'ROLE_ARN', value=role.role_arn)

        return role
Ejemplo n.º 11
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        with open(INPUT_WORKFLOW_FILE) as f:
            workflow_dict = json.load(f)
            glue_role = Role(
                self,
                "dummy_glue_job_role",
                assumed_by=ServicePrincipal("glue.amazonaws.com"),
                managed_policies=[
                    ManagedPolicy.from_aws_managed_policy_name(
                        "service-role/AWSGlueServiceRole")
                ],
            )
            # mock job
            job_command = CfnJob.JobCommandProperty(
                name='pythonshell', script_location=SCRIPT_LOCATION)

            for workflow_item in workflow_dict:
                for node in workflow_item['nodes']:
                    triggered_job_name = node['id']
                    job = CfnJob(self,
                                 triggered_job_name,
                                 name=triggered_job_name,
                                 command=job_command,
                                 role=glue_role.role_name)
Ejemplo n.º 12
0
Archivo: main.py Proyecto: jqbx-bot/bot
 def __init__(self, scope: Construct, _id: str, **kwargs) -> None:
     super().__init__(scope, _id, **kwargs)
     task_definition = FargateTaskDefinition(
         self,
         'TaskDefinition',
         cpu=256,
         memory_limit_mib=512,
         execution_role=Role(
             self,
             'ExecutionRole',
             assumed_by=cast(IPrincipal, ServicePrincipal('ecs-tasks.amazonaws.com'))
         ),
         task_role=Role(
             self,
             'TaskRole',
             assumed_by=cast(IPrincipal, ServicePrincipal('ecs-tasks.amazonaws.com')),
             managed_policies=[
                 ManagedPolicy.from_aws_managed_policy_name('AmazonSESFullAccess')
             ]
         )
     )
     task_definition.add_container(
         'Container',
         image=ContainerImage.from_asset(
             getcwd(),
             file='Dockerfile',
             repository_name='jqbx-bot',
             exclude=['cdk.out']
         ),
         command=['pipenv', 'run', 'python', '-u', '-m', 'src.main'],
         environment={
             'SPOTIFY_USER_ID': environ.get('SPOTIFY_USER_ID'),
             'JQBX_ROOM_ID': environ.get('JQBX_ROOM_ID'),
             'JQBX_BOT_DISPLAY_NAME': environ.get('JQBX_BOT_DISPLAY_NAME'),
             'JQBX_BOT_IMAGE_URL': environ.get('JQBX_BOT_IMAGE_URL'),
             'DATA_SERVICE_BASE_URL': environ.get('DATA_SERVICE_BASE_URL')
         },
         logging=AwsLogDriver(
             stream_prefix='jqbx-bot',
             log_group=LogGroup(self, 'LogGroup')
         )
     )
     cluster = Cluster(self, '%sCluster' % _id)
     FargateService(self, '%sService' % _id, cluster=cluster, task_definition=task_definition, desired_count=1)
    def create_role(self):
        service_principal = ServicePrincipal('lambda.amazonaws.com')

        return Role(
            self,
            'Role',
            assumed_by=service_principal,
            role_name='sfn_lambda_role',
            managed_policies=[self.managed_policy],
        )
Ejemplo n.º 14
0
    def attach_iam_policies_to_role(cls, role: Role):
        """
        Attach the necessary policies to read secrets from SSM and SecretsManager

        :param role:
        :return:
        """
        # TODO: Extract this in a managed policy
        secretsmanager_readonly_policy = PolicyStatement(
            resources=["*"],
            effect=Effect.ALLOW,
            actions=[
                "secretsmanager:GetResourcePolicy",
                "secretsmanager:GetSecretValue",
                "secretsmanager:DescribeSecret",
                "secretsmanager:ListSecretVersionIds",
            ]
        )
        role.add_to_policy(secretsmanager_readonly_policy)
        role.add_managed_policy(ManagedPolicy.from_aws_managed_policy_name('AmazonSSMReadOnlyAccess'))
Ejemplo n.º 15
0
    def _create_lambdas(self):
        clean_pycache()

        for root, dirs, files in os.walk(LAMBDAS_DIR):
            for f in files:
                if f != "__init__.py":
                    continue

                parent_folder = os.path.basename(os.path.dirname(root))
                lambda_folder = os.path.basename(root)
                name = f"{parent_folder}-{lambda_folder}"
                lambda_config = self.lambdas_config[name]

                layers = []
                for layer_name in lambda_config["layers"]:
                    layers.append(self.layers[layer_name])

                lambda_role = Role(
                    self,
                    f"{name}_role",
                    assumed_by=ServicePrincipal(service="lambda.amazonaws.com")
                )
                for policy in lambda_config["policies"]:
                    lambda_role.add_to_policy(policy)
                lambda_role.add_managed_policy(
                    ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole"))

                lambda_args = {
                    "code": Code.from_asset(root),
                    "handler": "__init__.handle",
                    "runtime": Runtime.PYTHON_3_8,
                    "layers": layers,
                    "function_name": name,
                    "environment": lambda_config["variables"],
                    "role": lambda_role,
                    "timeout": Duration.seconds(lambda_config["timeout"]),
                    "memory_size": lambda_config["memory"],
                }
                if "concurrent_executions" in lambda_config:
                    lambda_args["reserved_concurrent_executions"] = lambda_config["concurrent_executions"]

                self.lambdas[name] = Function(self, name, **lambda_args)

        self.lambdas["sqs_handlers-post_anime"].add_event_source(SqsEventSource(self.post_anime_queue))

        Rule(
            self,
            "titles_updater",
            schedule=Schedule.cron(hour="2", minute="10"),
            targets=[LambdaFunction(self.lambdas["crons-titles_updater"])]
        )
        Rule(
            self,
            "episodes_updater",
            schedule=Schedule.cron(hour="4", minute="10"),
            targets=[LambdaFunction(self.lambdas["crons-episodes_updater"])]
        )
Ejemplo n.º 16
0
    def create_default_infrastructure_config(
            self, construct_id: str) -> CfnInfrastructureConfiguration:
        """
        Create the default infrastructure config, which defines the permissions needed by Image Builder during
        image creation.
        """
        image_builder_role_name = f"DeadlineMachineImageBuilderRole{construct_id}"
        image_builder_role = Role(
            self,
            image_builder_role_name,
            assumed_by=ServicePrincipal("ec2.amazonaws.com"),
            role_name=image_builder_role_name)
        image_builder_role.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                'EC2InstanceProfileForImageBuilder'))
        image_builder_role.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                'AmazonSSMManagedInstanceCore'))

        image_builder_role.add_to_policy(
            PolicyStatement(actions=[
                's3:Get*',
                's3:List*',
            ],
                            resources=['arn:aws:s3:::thinkbox-installers/*']))

        image_builder_profile_name = f"DeadlineMachineImageBuilderPolicy{construct_id}"
        image_builder_profile = CfnInstanceProfile(
            self,
            image_builder_profile_name,
            instance_profile_name=image_builder_profile_name,
            roles=[image_builder_role_name])
        image_builder_profile.add_depends_on(
            image_builder_role.node.default_child)

        infrastructure_configuration = CfnInfrastructureConfiguration(
            self,
            f"InfrastructureConfig{construct_id}",
            name=f"DeadlineInfrastructureConfig{construct_id}",
            instance_profile_name=image_builder_profile_name)
        infrastructure_configuration.add_depends_on(image_builder_profile)

        return infrastructure_configuration
Ejemplo n.º 17
0
 def __init__(self, scope):
     super().__init__(scope, "bug")
     bucket = Bucket.from_bucket_name(
         self, "artifacts", core.Fn.import_value("CodeArtifactsBucket")
     )
     pipeline_role = Role.from_role_arn(
         self, "pipeline", core.Fn.import_value("CodePipelineRole")
     )
     pipeline = Pipeline(
         self,
         "Pipeline",
         artifact_bucket=bucket,
         role=pipeline_role,
         stages=[
             StageProps(
                 stage_name="Source",
                 actions=[
                     GitHubSourceAction(
                         action_name="Source",
                         run_order=1,
                         oauth_token=core.SecretValue("something"),
                         output=Artifact(artifact_name="SourceArtifact"),
                         owner="me",
                         repo="repo",
                         branch="master",
                     )
                 ],
             )
         ],
     )
     pipeline.add_stage(
         stage_name="Fails",
         actions=[
             LambdaInvokeAction(
                 action_name="LambdaInvokeAction",
                 run_order=1,
                 lambda_=Function.from_function_arn(
                     self, "function", core.Fn.import_value("SomeFunction")
                 ),
             )
         ],
     )
Ejemplo n.º 18
0
    def role(self) -> Role:
        inline_policies = {
            "ElasticsearchIndexPrividerFunctionElasticsearchAccessPolicy":
            PolicyDocument(statements=[
                PolicyStatement(
                    # TODO restrict this to appropriate API calls.
                    actions=["es:*"],
                    resources=["*"],
                )
            ]),
        }

        return Role(
            scope=self.__scope,
            id=f"{self.__name}ElasticsearchIndexResourceProviderRole",
            assumed_by=ServicePrincipal("lambda.amazonaws.com"),
            description=
            f"A role for ElasticsearchIndexResourceProvider lambda function.",
            inline_policies=inline_policies,
        )
Ejemplo n.º 19
0
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        global g_lambda_edge

        Tags.of(self).add("Stack", "Common-Lambda-Edge")

        self._role = Role(
            self,
            "EdgeLambdaRole",
            assumed_by=CompositePrincipal(
                ServicePrincipal("lambda.amazonaws.com"),
                ServicePrincipal("edgelambda.amazonaws.com"),
            ),
            managed_policies=[
                ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaBasicExecutionRole"),
            ],
        )

        if g_lambda_edge is not None:
            raise Exception("Only a single LambdaEdgeStack instance can exist")
        g_lambda_edge = self
Ejemplo n.º 20
0
    def _create_lambdas(self):
        for root, dirs, files in os.walk(LAMBDAS_DIR):
            for f in files:
                if f != "__init__.py":
                    continue

                parent_folder = os.path.basename(os.path.dirname(root))
                lambda_folder = os.path.basename(root)
                name = f"{parent_folder}-{lambda_folder}"
                lambda_config = self.lambdas_config[name]

                layers = []
                for layer_name in lambda_config["layers"]:
                    layers.append(self.layers[layer_name])

                lambda_role = Role(self,
                                   f"{name}_role",
                                   assumed_by=ServicePrincipal(
                                       service="lambda.amazonaws.com"))
                for policy in lambda_config["policies"]:
                    lambda_role.add_to_policy(policy)
                lambda_role.add_managed_policy(
                    ManagedPolicy.from_aws_managed_policy_name(
                        "service-role/AWSLambdaBasicExecutionRole"))

                self.lambdas[name] = Function(
                    self,
                    name,
                    code=Code.from_asset(root),
                    handler="__init__.handle",
                    runtime=Runtime.PYTHON_3_8,
                    layers=layers,
                    function_name=name,
                    environment=lambda_config["variables"],
                    role=lambda_role,
                    timeout=Duration.seconds(lambda_config["timeout"]),
                    memory_size=lambda_config["memory"],
                )

        Rule(self,
             "update_eps",
             schedule=Schedule.cron(hour="2", minute="10"),
             targets=[LambdaFunction(self.lambdas["cron-update_eps"])])
Ejemplo n.º 21
0
    def __init__(self,
                 scope: Construct,
                 id: str,
                 elasticsearch_index: ElasticsearchIndexResource,
                 dynamodb_table: Table,
                 kms_key: Optional[Key] = None,
                 *,
                 sagemaker_endpoint_name: str = None,
                 sagemaker_endpoint_arn: str = None,
                 sagemaker_embeddings_key: str = None) -> None:
        super().__init__(scope=scope, id=id)

        elasticsearch_layer = BElasticsearchLayer(
            scope=self, name=f"{id}ElasticsearchLayer")

        if bool(sagemaker_endpoint_name) ^ bool(sagemaker_embeddings_key):
            raise ValueError(
                f'In order to use sentence embedding, all of the following enviroment variables are required: '
                f'SAGEMAKER_ENDPOINT_NAME, SAGEMAKER_EMBEDDINGS_KEY. '
                f'Else, provide none of above.')

        if sagemaker_endpoint_name and not sagemaker_endpoint_arn:
            sagemaker_endpoint_arn = self.__resolve_sagemaker_endpoints_arn(
                '*')

        optional_sagemaker_parameters = {
            'SAGEMAKER_ENDPOINT_NAME': sagemaker_endpoint_name or None,
            'SAGEMAKER_EMBEDDINGS_KEY': sagemaker_embeddings_key or None
        }

        initial_cloner_function = SingletonFunction(
            scope=self,
            id='InitialClonerFunction',
            uuid='e01116a4-f939-43f2-8f5b-cc9f862c9e01',
            lambda_purpose='InitialClonerSingletonLambda',
            code=Code.from_asset(initial_cloner_root),
            handler='index.handler',
            runtime=Runtime.PYTHON_3_8,
            layers=[elasticsearch_layer],
            log_retention=RetentionDays.ONE_MONTH,
            memory_size=128,
            timeout=Duration.minutes(15),
            role=Role(
                scope=self,
                id='InitialClonerFunctionRole',
                assumed_by=ServicePrincipal('lambda.amazonaws.com'),
                inline_policies={
                    'LogsPolicy':
                    PolicyDocument(statements=[
                        PolicyStatement(
                            actions=[
                                'logs:CreateLogGroup',
                                'logs:CreateLogStream',
                                'logs:PutLogEvents',
                                'logs:DescribeLogStreams',
                            ],
                            resources=['arn:aws:logs:*:*:*'],
                            effect=Effect.ALLOW,
                        )
                    ]),
                    'ElasticsearchPolicy':
                    PolicyDocument(statements=[
                        PolicyStatement(
                            actions=[
                                'es:ESHttpDelete',
                                'es:ESHttpGet',
                                'es:ESHttpHead',
                                'es:ESHttpPatch',
                                'es:ESHttpPost',
                                'es:ESHttpPut',
                            ],
                            resources=['*'],
                            effect=Effect.ALLOW,
                        )
                    ]),
                    'DynamodbPolicy':
                    PolicyDocument(statements=[
                        PolicyStatement(
                            actions=['dynamodb:*'],
                            resources=['*'],
                            effect=Effect.ALLOW,
                        )
                    ]),
                },
                description='Role for DynamoDB Initial Cloner Function',
            ),
        )

        if kms_key:
            initial_cloner_function.add_to_role_policy(
                PolicyStatement(
                    actions=['kms:Decrypt'],
                    resources=[kms_key.key_arn],
                    effect=Effect.ALLOW,
                ), )

        initial_cloner = CustomResource(
            scope=self,
            id='InitialCloner',
            service_token=initial_cloner_function.function_arn,
            removal_policy=RemovalPolicy.DESTROY,
            properties={
                'DynamodbTableName':
                dynamodb_table.table_name,
                'ElasticsearchIndexName':
                elasticsearch_index.index_name,
                'ElasticsearchEndpoint':
                elasticsearch_index.elasticsearch_domain.domain_endpoint,
            },
            resource_type='Custom::ElasticsearchInitialCloner',
        )

        primary_key_field = initial_cloner.get_att_string('PrimaryKeyField')

        dynamodb_stream_arn = dynamodb_table.table_stream_arn
        if not dynamodb_stream_arn:
            raise Exception('DynamoDB streams must be enabled for the table')

        dynamodb_event_source = DynamoEventSource(
            table=dynamodb_table,
            starting_position=StartingPosition.LATEST,
            enabled=True,
            max_batching_window=Duration.seconds(10),
            bisect_batch_on_error=True,
            parallelization_factor=2,
            batch_size=1000,
            retry_attempts=10,
        )

        cloner_inline_policies = {
            'LogsPolicy':
            PolicyDocument(statements=[
                PolicyStatement(
                    actions=[
                        'logs:CreateLogGroup',
                        'logs:CreateLogStream',
                        'logs:PutLogEvents',
                        'logs:DescribeLogStreams',
                    ],
                    resources=['arn:aws:logs:*:*:*'],
                    effect=Effect.ALLOW,
                )
            ]),
            'ElasticsearchPolicy':
            PolicyDocument(statements=[
                PolicyStatement(
                    actions=[
                        'es:ESHttpDelete',
                        'es:ESHttpGet',
                        'es:ESHttpHead',
                        'es:ESHttpPatch',
                        'es:ESHttpPost',
                        'es:ESHttpPut',
                    ],
                    resources=[
                        f'{elasticsearch_index.elasticsearch_domain.domain_arn}/*'
                    ],
                    effect=Effect.ALLOW,
                )
            ]),
            'DynamodbStreamsPolicy':
            PolicyDocument(statements=[
                PolicyStatement(
                    actions=[
                        'dynamodb:DescribeStream',
                        'dynamodb:GetRecords',
                        'dynamodb:GetShardIterator',
                        'dynamodb:ListStreams',
                    ],
                    resources=[dynamodb_stream_arn],
                    effect=Effect.ALLOW,
                )
            ]),
        }

        if sagemaker_endpoint_arn:
            cloner_inline_policies['SagemakerPolicy'] = PolicyDocument(
                statements=[
                    PolicyStatement(actions=['sagemaker:InvokeEndpoint'],
                                    resources=[sagemaker_endpoint_arn],
                                    effect=Effect.ALLOW)
                ])

        cloner_function = Function(
            scope=self,
            id='ClonerFunction',
            code=Code.from_asset(cloner_root),
            handler='index.handler',
            runtime=Runtime.PYTHON_3_8,
            environment={
                'ES_INDEX_NAME': elasticsearch_index.index_name,
                'ES_DOMAIN_ENDPOINT':
                elasticsearch_index.elasticsearch_domain.domain_endpoint,
                'PRIMARY_KEY_FIELD': primary_key_field,
                **{
                    k: optional_sagemaker_parameters[k]
                    for k in optional_sagemaker_parameters if all(optional_sagemaker_parameters.values(
                    ))
                }
            },
            events=[dynamodb_event_source],
            layers=[elasticsearch_layer],
            log_retention=RetentionDays.ONE_MONTH,
            memory_size=128,
            role=Role(
                scope=self,
                id='ClonerFunctionRole',
                assumed_by=ServicePrincipal('lambda.amazonaws.com'),
                inline_policies=cloner_inline_policies,
                description='Role for DynamoDB Cloner Function',
            ),
            timeout=Duration.seconds(30),
        )

        if kms_key:
            cloner_function.add_to_role_policy(
                PolicyStatement(
                    actions=['kms:Decrypt'],
                    resources=[kms_key.key_arn],
                    effect=Effect.ALLOW,
                ))
Ejemplo n.º 22
0
    def __init__(self,
                 scope: Stack,
                 id: str,
                 capacity: Optional[AddCapacityOptions] = None,
                 cluster_name: Optional[str] = None,
                 container_insights: Optional[bool] = None,
                 default_cloud_map_namespace: Optional[
                     CloudMapNamespaceOptions] = None,
                 vpc: Optional[IVpc] = None,
                 **kwargs) -> None:
        known_args = dict(
            scope=scope,
            id=id,
            capacity=capacity,
            cluster_name=cluster_name,
            container_insights=container_insights,
            default_cloud_map_namespace=default_cloud_map_namespace,
            vpc=vpc)

        unknown_args = kwargs

        super().__init__(**{**known_args, **unknown_args})

        self.__role = Role(
            scope=scope,
            id=cluster_name + 'CustomResourceRole',
            role_name=cluster_name + 'CustomResourceRole',
            assumed_by=CompositePrincipal(
                ServicePrincipal("lambda.amazonaws.com"),
                ServicePrincipal("cloudformation.amazonaws.com")),
            inline_policies={
                cluster_name + 'CustomResourcePolicy':
                PolicyDocument(statements=[
                    PolicyStatement(actions=[
                        "ecs:ListClusters",
                        "ecs:ListContainerInstances",
                        "ecs:ListServices",
                        "ecs:ListTaskDefinitions",
                        "ecs:ListTasks",
                        "ecs:DescribeClusters",
                        "ecs:DescribeContainerInstances",
                        "ecs:DescribeServices",
                        "ecs:DescribeTaskDefinition",
                        "ecs:DescribeTasks",
                        "ecs:CreateCluster",
                        "ecs:DeleteCluster",
                        "ecs:DeleteService",
                        "ecs:DeregisterContainerInstance",
                        "ecs:DeregisterTaskDefinition",
                        "ecs:StopTask",
                        "ecs:UpdateService",
                    ],
                                    effect=Effect.ALLOW,
                                    resources=['*']),
                    PolicyStatement(actions=[
                        "logs:CreateLogGroup", "logs:CreateLogStream",
                        "logs:PutLogEvents"
                    ],
                                    effect=Effect.ALLOW,
                                    resources=['*']),
                ])
            },
            managed_policies=[])

        self.__custom_backend = Function(
            scope=scope,
            id=cluster_name + 'Deleter',
            code=Code.from_asset(path=package_root),
            handler='index.handler',
            runtime=Runtime.PYTHON_3_6,
            description=
            f'A custom resource backend to delete ecs cluster ({cluster_name}) in the right way.',
            function_name=cluster_name + 'Deleter',
            memory_size=128,
            role=self.__role,
            timeout=Duration.seconds(900),
        )

        # noinspection PyTypeChecker
        provider: ICustomResourceProvider = CustomResourceProvider.from_lambda(
            self.__custom_backend)

        self.__custom_resource = CustomResource(
            scope=scope,
            id=cluster_name + 'CustomResource',
            provider=provider,
            removal_policy=RemovalPolicy.DESTROY,
            properties={'clusterName': cluster_name},
            resource_type='Custom::EmptyS3Bucket')

        # Make sure that custom resource is deleted before lambda function backend.
        self.__custom_resource.node.add_dependency(self.__custom_backend)
        # Make sure that custom resource is deleted before the bucket.
        self.__custom_resource.node.add_dependency(self)
Ejemplo n.º 23
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        table_name = 'items'

        items_graphql_api = CfnGraphQLApi(self,
                                          'ItemsApi',
                                          name='items-api',
                                          authentication_type='API_KEY')

        CfnApiKey(self, 'ItemsApiKey', api_id=items_graphql_api.attr_api_id)

        api_schema = CfnGraphQLSchema(self,
                                      'ItemsSchema',
                                      api_id=items_graphql_api.attr_api_id,
                                      definition=f"""\
                type {table_name} {{
                    {table_name}Id: ID!
                    name: String
                }}
                type Paginated{table_name} {{
                    items: [{table_name}!]!
                    nextToken: String
                }}
                type Query {{
                    all(limit: Int, nextToken: String): Paginated{table_name}!
                    getOne({table_name}Id: ID!): {table_name}
                }}
                type Mutation {{
                    save(name: String!): {table_name}
                    delete({table_name}Id: ID!): {table_name}
                }}
                type Schema {{
                    query: Query
                    mutation: Mutation
                }}""")

        items_table = Table(
            self,
            'ItemsTable',
            table_name=table_name,
            partition_key=Attribute(name=f'{table_name}Id',
                                    type=AttributeType.STRING),
            billing_mode=BillingMode.PAY_PER_REQUEST,
            stream=StreamViewType.NEW_IMAGE,

            # The default removal policy is RETAIN, which means that cdk
            # destroy will not attempt to delete the new table, and it will
            # remain in your account until manually deleted. By setting the
            # policy to DESTROY, cdk destroy will delete the table (even if it
            # has data in it)
            removal_policy=core.RemovalPolicy.
            DESTROY  # NOT recommended for production code
        )

        items_table_role = Role(
            self,
            'ItemsDynamoDBRole',
            assumed_by=ServicePrincipal('appsync.amazonaws.com'))

        items_table_role.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                'AmazonDynamoDBFullAccess'))

        data_source = CfnDataSource(
            self,
            'ItemsDataSource',
            api_id=items_graphql_api.attr_api_id,
            name='ItemsDynamoDataSource',
            type='AMAZON_DYNAMODB',
            dynamo_db_config=CfnDataSource.DynamoDBConfigProperty(
                table_name=items_table.table_name, aws_region=self.region),
            service_role_arn=items_table_role.role_arn)

        get_one_resolver = CfnResolver(
            self,
            'GetOneQueryResolver',
            api_id=items_graphql_api.attr_api_id,
            type_name='Query',
            field_name='getOne',
            data_source_name=data_source.name,
            request_mapping_template=f"""\
            {{
                "version": "2017-02-28",
                "operation": "GetItem",
                "key": {{
                "{table_name}Id": $util.dynamodb.toDynamoDBJson($ctx.args.{table_name}Id)
                }}
            }}""",
            response_mapping_template="$util.toJson($ctx.result)")

        get_one_resolver.add_depends_on(api_schema)

        get_all_resolver = CfnResolver(
            self,
            'GetAllQueryResolver',
            api_id=items_graphql_api.attr_api_id,
            type_name='Query',
            field_name='all',
            data_source_name=data_source.name,
            request_mapping_template=f"""\
            {{
                "version": "2017-02-28",
                "operation": "Scan",
                "limit": $util.defaultIfNull($ctx.args.limit, 20),
                "nextToken": $util.toJson($util.defaultIfNullOrEmpty($ctx.args.nextToken, null))
            }}""",
            response_mapping_template="$util.toJson($ctx.result)")

        get_all_resolver.add_depends_on(api_schema)

        save_resolver = CfnResolver(
            self,
            'SaveMutationResolver',
            api_id=items_graphql_api.attr_api_id,
            type_name='Mutation',
            field_name='save',
            data_source_name=data_source.name,
            request_mapping_template=f"""\
            {{
                "version": "2017-02-28",
                "operation": "PutItem",
                "key": {{
                    "{table_name}Id": {{ "S": "$util.autoId()" }}
                }},
                "attributeValues": {{
                    "name": $util.dynamodb.toDynamoDBJson($ctx.args.name)
                }}
            }}""",
            response_mapping_template="$util.toJson($ctx.result)")

        save_resolver.add_depends_on(api_schema)

        delete_resolver = CfnResolver(
            self,
            'DeleteMutationResolver',
            api_id=items_graphql_api.attr_api_id,
            type_name='Mutation',
            field_name='delete',
            data_source_name=data_source.name,
            request_mapping_template=f"""\
            {{
                "version": "2017-02-28",
                "operation": "DeleteItem",
                "key": {{
                "{table_name}Id": $util.dynamodb.toDynamoDBJson($ctx.args.{table_name}Id)
                }}
            }}""",
            response_mapping_template="$util.toJson($ctx.result)")

        delete_resolver.add_depends_on(api_schema)
Ejemplo n.º 24
0
    def __init__(self, scope: Construct, stack_id: str, *, props: SEPStackProps, **kwargs):
        """
        Initialize a new instance of SEPStack
        :param scope: The scope of this construct.
        :param stack_id: The ID of this construct.
        :param props: The properties for this construct.
        :param kwargs: Any kwargs that need to be passed on to the parent class.
        """
        super().__init__(scope, stack_id, **kwargs)

         # The VPC that all components of the render farm will be created in.
        vpc = Vpc(
            self,
            'Vpc',
            max_azs=2,
        )

        recipes = ThinkboxDockerRecipes(
            self,
            'Image',
            stage=Stage.from_directory(props.docker_recipes_stage_path),
        )

        repository = Repository(
            self,
            'Repository',
            vpc=vpc,
            version=recipes.version,
            repository_installation_timeout=Duration.minutes(20),
            # TODO - Evaluate deletion protection for your own needs. These properties are set to RemovalPolicy.DESTROY
            # to cleanly remove everything when this stack is destroyed. If you would like to ensure
            # that these resources are not accidentally deleted, you should set these properties to RemovalPolicy.RETAIN
            # or just remove the removal_policy parameter.
            removal_policy=RepositoryRemovalPolicies(
                database=RemovalPolicy.DESTROY,
                filesystem=RemovalPolicy.DESTROY,
            ),
        )

        host = 'renderqueue'
        zone_name = 'deadline-test.internal'

        # Internal DNS zone for the VPC.
        dns_zone = PrivateHostedZone(
            self,
            'DnsZone',
            vpc=vpc,
            zone_name=zone_name,
        )

        ca_cert = X509CertificatePem(
            self,
            'RootCA',
            subject=DistinguishedName(
                cn='SampleRootCA',
            ),
        )

        server_cert = X509CertificatePem(
            self,
            'RQCert',
            subject=DistinguishedName(
                cn=f'{host}.{dns_zone.zone_name}',
                o='RFDK-Sample',
                ou='RenderQueueExternal',
            ),
            signing_certificate=ca_cert,
        )

        render_queue = RenderQueue(
            self,
            'RenderQueue',
            vpc=vpc,
            version=recipes.version,
            images=recipes.render_queue_images,
            repository=repository,
            # TODO - Evaluate deletion protection for your own needs. This is set to false to
            # cleanly remove everything when this stack is destroyed. If you would like to ensure
            # that this resource is not accidentally deleted, you should set this to true.
            deletion_protection=False,
            hostname=RenderQueueHostNameProps(
                hostname=host,
                zone=dns_zone,
            ),
            traffic_encryption=RenderQueueTrafficEncryptionProps(
                external_tls=RenderQueueExternalTLSProps(
                    rfdk_certificate=server_cert,
                ),
                internal_protocol=ApplicationProtocol.HTTPS,
            ),
        )

        if props.create_resource_tracker_role:
            # Creates the Resource Tracker Access role. This role is required to exist in your account so the resource tracker will work properly
            Role(
                self,
                'ResourceTrackerRole',
                assumed_by=ServicePrincipal('lambda.amazonaws.com'),
                managed_policies= [ManagedPolicy.from_aws_managed_policy_name('AWSThinkboxDeadlineResourceTrackerAccessPolicy')],
                role_name= 'DeadlineResourceTrackerAccessRole',
            )

        fleet = SpotEventPluginFleet(
            self,
            'SpotEventPluginFleet',
            vpc=vpc,
            render_queue=render_queue,
            deadline_groups=['group_name'],
            instance_types=[InstanceType.of(InstanceClass.BURSTABLE3, InstanceSize.LARGE)],
            worker_machine_image=props.worker_machine_image,
            max_capacity=1,
        )

        # Optional: Add additional tags to both spot fleet request and spot instances.
        Tags.of(fleet).add('name', 'SEPtest')

        ConfigureSpotEventPlugin(
            self,
            'ConfigureSpotEventPlugin',
            vpc=vpc,
            render_queue=render_queue,
            spot_fleets=[fleet],
            configuration=SpotEventPluginSettings(
                enable_resource_tracker=True,
            ),
        )
Ejemplo n.º 25
0
    def __init__(
            self,
            scope: Stack,
            id: str,
            on_create_action: Dict[str, Any],
            on_update_action: Dict[str, Any],
            on_delete_action: Dict[str, Any],
    ) -> None:
        """
        Constructor.

        :param scope: CloudFormation stack in which resources should be placed.
        :param id: Name (id) or prefix for resources.
        :param on_create_action: Create action arguments. Read more on:
        https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.create_service
        :param on_update_action: Update action arguments. Read more on:
        https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.update_service
        :param on_delete_action: Delete action arguments. Read more on:
        https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.delete_service
        """
        self.__role = Role(
            scope=scope,
            id=id + 'Role',
            role_name=id + 'Role',
            assumed_by=CompositePrincipal(
                ServicePrincipal("lambda.amazonaws.com"),
                ServicePrincipal("cloudformation.amazonaws.com")
            ),
            inline_policies={
                id + 'Policy': PolicyDocument(
                    statements=[
                        PolicyStatement(
                            actions=[
                                'ecs:createService',
                                'ecs:updateService',
                                'ecs:deleteService',
                                'ecs:describeServices',
                                'ecs:listServices',
                                'ecs:updateServicePrimaryTaskSet'
                            ],
                            effect=Effect.ALLOW,
                            resources=['*']
                        ),
                        PolicyStatement(
                            actions=[
                                "logs:CreateLogGroup",
                                "logs:CreateLogStream",
                                "logs:PutLogEvents"
                            ],
                            effect=Effect.ALLOW,
                            resources=['*']
                        ),
                    ]
                )
            },
            managed_policies=[]
        )

        self.__custom_backend = Function(
            scope=scope,
            id=id + 'Backend',
            code=Code.from_asset(
                path=package_root
            ),
            handler='index.handler',
            runtime=Runtime.PYTHON_3_6,
            description=f'A custom resource backend to manage ecs {id} service.',
            function_name=id + 'Backend',
            memory_size=128,
            role=self.__role,
            timeout=Duration.seconds(900),
        )

        # noinspection PyTypeChecker
        provider: ICustomResourceProvider = CustomResourceProvider.from_lambda(self.__custom_backend)

        self.__custom_resource = CustomResource(
            scope=scope,
            id=id + 'CustomResource',
            provider=provider,
            removal_policy=RemovalPolicy.DESTROY,
            properties={
                'onCreate': on_create_action,
                'onUpdate': on_update_action,
                'onDelete': on_delete_action
            },
            resource_type='Custom::EcsService'
        )

        # Make sure that custom resource is deleted before lambda function backend.
        self.__custom_resource.node.add_dependency(self.__custom_backend)
    def __init__(self, scope: core.Construct, id: str, prefix: str,
                 source_bucket: s3.Bucket, dest_bucket: s3.Bucket,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        suffix = Fn.select(
            4, Fn.split('-', Fn.select(2, Fn.split('/', self.stack_id))))

        # KMS key for Kinesis Data Streams
        self.__kms_key = Key(scope=self,
                             id='kms-kinesis',
                             alias='custom/kinesis',
                             description='KMS key for Kinesis Data Streams',
                             enable_key_rotation=True)

        # Create Kinesis streams
        self.__sale_stream = Stream(scope=self,
                                    id="saleStream",
                                    stream_name="ara-web-sale",
                                    encryption_key=self.__kms_key)
        self.__address_stream = Stream(scope=self,
                                       id="addressStream",
                                       stream_name="ara-web-customer-address",
                                       encryption_key=self.__kms_key)
        self.__customer_stream = Stream(scope=self,
                                        id="customerStream",
                                        stream_name="ara-web-customer",
                                        encryption_key=self.__kms_key)

        # Role for the KDA service
        kda_role = Role(scope=self,
                        id='KinesisAnalyticsRole',
                        assumed_by=ServicePrincipal(
                            service='kinesisanalytics.amazonaws.com'))

        # Grant read on Kinesis streams
        self.__customer_stream.grant_read(kda_role)
        self.__address_stream.grant_read(kda_role)
        self.__sale_stream.grant_read(kda_role)

        # Grant read on source bucket (reference data)
        source_bucket.grant_read(kda_role)
        # Grant write on destination bucket
        dest_bucket.grant_write(kda_role)

        kda_role.add_to_policy(
            PolicyStatement(actions=['kinesis:ListShards'],
                            resources=[
                                self.__customer_stream.stream_arn,
                                self.__address_stream.stream_arn,
                                self.__sale_stream.stream_arn
                            ]))

        # Create Elasticsearch domain
        # TODO: use VPC subnets
        es_domain = EsDomain(scope=self,
                             id='EsDomain',
                             application_prefix=prefix,
                             suffix=suffix,
                             kda_role=kda_role)

        # Create the KDA application after the Elasticsearch service
        kda_app = KdaApplication(scope=self,
                                 id='KdaApplication',
                                 es_domain=es_domain.es_domain,
                                 kda_role=kda_role,
                                 source_bucket=source_bucket,
                                 dest_bucket=dest_bucket)

        core.Tags.of(self).add('module-name', 'streaming')
Ejemplo n.º 27
0
    def __init__(self, scope, id, **kwargs):

        super().__init__(scope, id, **kwargs)

        # Each publisher instance authenticates as an individual user stored in this pool
        user_pool = UserPool(self,
                             'user-pool',
                             user_pool_name='amazon-cloudwatch-publisher')

        # Set up the client app with simple username/password authentication
        user_pool_client = UserPoolClient(
            self,
            'user-pool-client',
            user_pool=user_pool,
            enabled_auth_flows=[AuthFlow.USER_PASSWORD],
            user_pool_client_name='amazon-cloudwatch-publisher')

        # The identity pool exists to associate users to roles they can assume
        identity_pool = CfnIdentityPool(
            self,
            'identity-pool',
            identity_pool_name='amazon-cloudwatch-publisher',
            allow_unauthenticated_identities=False)

        # Setting this property links the identity pool to the user pool client app
        identity_pool.add_property_override(
            property_path='CognitoIdentityProviders',
            value=[{
                'ClientId':
                user_pool_client.user_pool_client_id,
                'ProviderName':
                'cognito-idp.{0}.amazonaws.com/{1}'.format(
                    self.region, user_pool.user_pool_id)
            }])

        # Only identities that come from Congito users should be able to assume the publisher role
        principal = FederatedPrincipal(
            federated='cognito-identity.amazonaws.com',
            assume_role_action='sts:AssumeRoleWithWebIdentity',
            conditions={
                'StringEquals': {
                    'cognito-identity.amazonaws.com:aud': identity_pool.ref
                },
                'ForAnyValue:StringLike': {
                    'cognito-identity.amazonaws.com:amr': 'authenticated'
                }
            })

        # Minimum set of permissions required to push metrics and logs
        policy = PolicyDocument(statements=[
            PolicyStatement(effect=Effect.ALLOW,
                            actions=[
                                'cloudwatch:PutMetricData',
                                'logs:CreateLogGroup',
                                'logs:CreateLogStream',
                                'logs:DescribeLogGroups',
                                'logs:DescribeLogStreams',
                                'logs:PutLogEvents',
                            ],
                            resources=[
                                '*',
                            ])
        ])

        # Create the role itself using the principal and policy defined above
        role = Role(self,
                    'role',
                    assumed_by=principal,
                    inline_policies=[policy])

        # Associate the above role with the identity pool; we don't want
        # any unauthenticated access so explicitly ensure it's set to None
        CfnIdentityPoolRoleAttachment(
            self,
            'identity-pool-role-attachment-authenticated',
            identity_pool_id=identity_pool.ref,
            roles={
                'authenticated': role.role_arn,
                'unauthenticated': None
            })

        # Defining outputs here allows them to be scraped from the `cdk deploy` command
        CfnOutput(self, 'Region', value=self.region)
        CfnOutput(self, 'AccountId', value=self.account)
        CfnOutput(self, 'UserPoolId', value=user_pool.user_pool_id)
        CfnOutput(self, 'IdentityPoolId', value=identity_pool.ref)
        CfnOutput(self,
                  'AppClientId',
                  value=user_pool_client.user_pool_client_id)
    def __init__(self, scope: cdk.Construct, construct_id: str, config, vpc: IVpc, instance: IInstance, neo4j_user_secret: ISecret, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        bucket = Bucket(self, "s3-bucket-altimeter", 
            bucket_name=config["s3_bucket"],
            encryption=BucketEncryption.UNENCRYPTED, #.S3_MANAGED, # Disable encryption since it's not really required and it conflicts with SCP guardrails set by Control Tower on the Audit account.
            block_public_access=BlockPublicAccess.BLOCK_ALL
        )

        cluster = Cluster(self, "ecs-cluster-altimeter", 
            cluster_name="ecsclstr-altimeter--default",
            vpc=vpc               
        )

        task_role = Role(self, "iam-role-altimeter-task-role",
            assumed_by=ServicePrincipal("ecs-tasks.amazonaws.com"),
            # It appears that within the account where the scanner is running, the task role is (partially) used for scanning resources (rather than the altimeter-scanner-access role).      
            managed_policies=[
                ManagedPolicy.from_aws_managed_policy_name('SecurityAudit'),
                ManagedPolicy.from_aws_managed_policy_name('job-function/ViewOnlyAccess')
            ]
        )

        task_definition = FargateTaskDefinition(self, "ecs-fgtd-altimeter",
            task_role=task_role,
            memory_limit_mib=self.MEMORY_LIMIT,
            cpu=self.CPU
        )

        docker_path = os.path.join(os.path.curdir,"..")

        image_asset = DockerImageAsset(self, 'ecr-assets-dia-altimeter', 
            directory=docker_path,
            file="scanner.Dockerfile"
        )            

        task_definition.add_container("ecs-container-altimeter",            
            image= ContainerImage.from_docker_image_asset(image_asset),
            # memory_limit_mib=self.MEMORY_LIMIT,
            # cpu=self.CPU,
            environment= {
                "CONFIG_PATH": config["altimeter_config_path"],
                "S3_BUCKET": config["s3_bucket"]
            },
            logging= AwsLogDriver(
                stream_prefix= 'altimeter',
                log_retention= RetentionDays.TWO_WEEKS
            )
        )

        task_definition.add_to_task_role_policy(PolicyStatement(
            resources=["arn:aws:iam::*:role/"+config["account_execution_role"]],
            actions=['sts:AssumeRole']
        ))

        task_definition.add_to_task_role_policy(PolicyStatement(
            resources=[
                "arn:aws:s3:::"+config["s3_bucket"],
                "arn:aws:s3:::"+config["s3_bucket"]+"/*"
            ],
            actions=["s3:GetObject*",
                "s3:GetBucket*",
                "s3:List*",
                "s3:DeleteObject*",
                "s3:PutObject",
                "s3:Abort*",
                "s3:PutObjectTagging"]
        ))

        # Grant the ability to record the stdout to CloudWatch Logs
        # TODO: Refine
        task_definition.add_to_task_role_policy(PolicyStatement(
            resources=["*"],
            actions=['logs:*']
        ))

        # Trigger task every 24 hours
        Rule(self, "events-rule-altimeter-daily-scan",
            rule_name="evrule--altimeter-daily-scan",
            schedule=Schedule.cron(hour="0", minute="0"),
            description="Daily altimeter scan",
            targets=[EcsTask(
                task_definition=task_definition,
                cluster=cluster,
                subnet_selection=SubnetSelection(subnet_type=SubnetType.PRIVATE)
            )]
        )

        # Trigger task manually via event
        Rule(self, "events-rule-altimeter-manual-scan",
            rule_name="evrule--altimeter-manual-scan",
            event_pattern=EventPattern(source=['altimeter']), 
            description="Manual altimeter scan",
            targets=[EcsTask(
                task_definition=task_definition,
                cluster=cluster,
                subnet_selection=SubnetSelection(subnet_type=SubnetType.PRIVATE)
            )]
        )        


        # Don't put Neo4j Importer lambda in a separate stack since it causes a circular reference with the S3 event source, and using an imported bucket as event source is not possible (you need a Bucket, not an IBucket)



        neo4j_importer_function = PythonFunction(self, 'lambda-function-neo4j-importer',
            function_name="function-altimeter--neo4j-importer",             
            entry="../neo4j-importer",
            index="app.py",
            handler="lambda_handler",
            runtime=Runtime.PYTHON_3_8,
            memory_size=256,
            timeout=cdk.Duration.seconds(60),
            vpc=vpc,
            vpc_subnets=SubnetSelection(subnets=vpc.select_subnets(subnet_group_name='Private').subnets),
            environment={
                "neo4j_address": instance.instance_private_ip,
                "neo4j_user_secret_name": neo4j_user_secret.secret_name
            }
        )

        neo4j_importer_function.add_event_source(
            S3EventSource(bucket,
                events= [EventType.OBJECT_CREATED, EventType.OBJECT_REMOVED],
                filters= [ { "prefix": "raw/", "suffix": ".rdf"}]
            )
        )

        # Grant lambda read/write access to the S3 bucket for reading raw rdf, writing prepared rdf and generating signed uri
        bucket.grant_read_write(neo4j_importer_function.role)
        # Grant lambda read access to the neo4j user secret
        neo4j_user_secret.grant_read(neo4j_importer_function.role)
    def __init__(self, scope: core.Construct, id: str, application_prefix: str,
                 suffix: str, kda_role: Role, **kwargs):
        super().__init__(scope, id, **kwargs)

        stack = Stack.of(self)
        region = stack.region

        # Create Cognito User Pool
        self.__user_pool = CfnUserPool(
            scope=self,
            id='UserPool',
            admin_create_user_config={'allowAdminCreateUserOnly': True},
            policies={'passwordPolicy': {
                'minimumLength': 8
            }},
            username_attributes=['email'],
            auto_verified_attributes=['email'],
            user_pool_name=application_prefix + '_user_pool')

        # Create a Cognito User Pool Domain using the newly created Cognito User Pool
        CfnUserPoolDomain(scope=self,
                          id='CognitoDomain',
                          domain=application_prefix + '-' + suffix,
                          user_pool_id=self.user_pool.ref)

        # Create Cognito Identity Pool
        self.__id_pool = CfnIdentityPool(
            scope=self,
            id='IdentityPool',
            allow_unauthenticated_identities=False,
            cognito_identity_providers=[],
            identity_pool_name=application_prefix + '_identity_pool')

        trust_relationship = FederatedPrincipal(
            federated='cognito-identity.amazonaws.com',
            conditions={
                'StringEquals': {
                    'cognito-identity.amazonaws.com:aud': self.id_pool.ref
                },
                'ForAnyValue:StringLike': {
                    'cognito-identity.amazonaws.com:amr': 'authenticated'
                }
            },
            assume_role_action='sts:AssumeRoleWithWebIdentity')
        # IAM role for master user
        master_auth_role = Role(scope=self,
                                id='MasterAuthRole',
                                assumed_by=trust_relationship)
        # Role for authenticated user
        limited_auth_role = Role(scope=self,
                                 id='LimitedAuthRole',
                                 assumed_by=trust_relationship)
        # Attach Role to Identity Pool
        CfnIdentityPoolRoleAttachment(
            scope=self,
            id='userPoolRoleAttachment',
            identity_pool_id=self.id_pool.ref,
            roles={'authenticated': limited_auth_role.role_arn})
        # Create master-user-group
        CfnUserPoolGroup(scope=self,
                         id='AdminsGroup',
                         user_pool_id=self.user_pool.ref,
                         group_name='master-user-group',
                         role_arn=master_auth_role.role_arn)
        # Create limited-user-group
        CfnUserPoolGroup(scope=self,
                         id='UsersGroup',
                         user_pool_id=self.user_pool.ref,
                         group_name='limited-user-group',
                         role_arn=limited_auth_role.role_arn)
        # Role for the Elasticsearch service to access Cognito
        es_role = Role(scope=self,
                       id='EsRole',
                       assumed_by=ServicePrincipal(service='es.amazonaws.com'),
                       managed_policies=[
                           ManagedPolicy.from_aws_managed_policy_name(
                               'AmazonESCognitoAccess')
                       ])

        # Use the following command line to generate the python dependencies layer content
        # pip3 install -t lambda-layer/python/lib/python3.8/site-packages -r lambda/requirements.txt
        # Build the lambda layer assets
        subprocess.call([
            'pip', 'install', '-t',
            'streaming/streaming_cdk/lambda-layer/python/lib/python3.8/site-packages',
            '-r', 'streaming/streaming_cdk/bootstrap-lambda/requirements.txt',
            '--upgrade'
        ])

        requirements_layer = _lambda.LayerVersion(
            scope=self,
            id='PythonRequirementsTemplate',
            code=_lambda.Code.from_asset(
                'streaming/streaming_cdk/lambda-layer'),
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_8])

        # This lambda function will bootstrap the Elasticsearch cluster
        bootstrap_function_name = 'AESBootstrap'
        register_template_lambda = _lambda.Function(
            scope=self,
            id='RegisterTemplate',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset(
                'streaming/streaming_cdk/bootstrap-lambda'),
            handler='es-bootstrap.lambda_handler',
            environment={
                'REGION': region,
                'KDA_ROLE_ARN': kda_role.role_arn,
                'MASTER_ROLE_ARN': master_auth_role.role_arn
            },
            layers=[requirements_layer],
            timeout=Duration.minutes(15),
            function_name=bootstrap_function_name)

        lambda_role = register_template_lambda.role
        lambda_role.add_to_policy(
            PolicyStatement(
                actions=['logs:CreateLogGroup'],
                resources=[stack.format_arn(service='logs', resource='*')]))
        lambda_role.add_to_policy(
            PolicyStatement(
                actions=['logs:CreateLogStream', 'logs:PutLogEvents'],
                resources=[
                    stack.format_arn(service='logs',
                                     resource='log_group',
                                     resource_name='/aws/lambda/' +
                                     bootstrap_function_name + ':*')
                ]))

        # Let the lambda assume the master role so that actions can be executed on the cluster
        # https://aws.amazon.com/premiumsupport/knowledge-center/lambda-function-assume-iam-role/
        lambda_role.add_to_policy(
            PolicyStatement(actions=['sts:AssumeRole'],
                            resources=[master_auth_role.role_arn]))

        master_auth_role.assume_role_policy.add_statements(
            PolicyStatement(actions=['sts:AssumeRole'],
                            principals=[lambda_role]))

        # List all the roles that are allowed to access the Elasticsearch cluster.
        roles = [
            ArnPrincipal(limited_auth_role.role_arn),
            ArnPrincipal(master_auth_role.role_arn),
            ArnPrincipal(kda_role.role_arn)
        ]  # The users
        if register_template_lambda and register_template_lambda.role:
            roles.append(ArnPrincipal(
                lambda_role.role_arn))  # The lambda used to bootstrap
        # Create kms key
        kms_key = Key(scope=self,
                      id='kms-es',
                      alias='custom/es',
                      description='KMS key for Elasticsearch domain',
                      enable_key_rotation=True)

        # AES Log Groups
        es_app_log_group = logs.LogGroup(scope=self,
                                         id='EsAppLogGroup',
                                         retention=logs.RetentionDays.ONE_WEEK,
                                         removal_policy=RemovalPolicy.RETAIN)

        # Create the Elasticsearch domain
        es_domain_arn = stack.format_arn(service='es',
                                         resource='domain',
                                         resource_name=application_prefix +
                                         '/*')

        es_access_policy = PolicyDocument(statements=[
            PolicyStatement(principals=roles,
                            actions=[
                                'es:ESHttpGet', 'es:ESHttpPut',
                                'es:ESHttpPost', 'es:ESHttpDelete'
                            ],
                            resources=[es_domain_arn])
        ])
        self.__es_domain = es.CfnDomain(
            scope=self,
            id='searchDomain',
            elasticsearch_cluster_config={
                'instanceType': 'r5.large.elasticsearch',
                'instanceCount': 2,
                'dedicatedMasterEnabled': True,
                'dedicatedMasterCount': 3,
                'dedicatedMasterType': 'r5.large.elasticsearch',
                'zoneAwarenessEnabled': True,
                'zoneAwarenessConfig': {
                    'AvailabilityZoneCount': '2'
                },
            },
            encryption_at_rest_options={
                'enabled': True,
                'kmsKeyId': kms_key.key_id
            },
            node_to_node_encryption_options={'enabled': True},
            ebs_options={
                'volumeSize': 10,
                'ebsEnabled': True
            },
            elasticsearch_version='7.9',
            domain_name=application_prefix,
            access_policies=es_access_policy,
            cognito_options={
                'enabled': True,
                'identityPoolId': self.id_pool.ref,
                'roleArn': es_role.role_arn,
                'userPoolId': self.user_pool.ref
            },
            advanced_security_options={
                'enabled': True,
                'internalUserDatabaseEnabled': False,
                'masterUserOptions': {
                    'masterUserArn': master_auth_role.role_arn
                }
            },
            domain_endpoint_options={
                'enforceHttps': True,
                'tlsSecurityPolicy': 'Policy-Min-TLS-1-2-2019-07'
            },
            # log_publishing_options={
            #     # 'ES_APPLICATION_LOGS': {
            #     #     'enabled': True,
            #     #     'cloud_watch_logs_log_group_arn': es_app_log_group.log_group_arn
            #     # },
            #     # 'AUDIT_LOGS': {
            #     #     'enabled': True,
            #     #     'cloud_watch_logs_log_group_arn': ''
            #     # },
            #     # 'SEARCH_SLOW_LOGS': {
            #     #     'enabled': True,
            #     #     'cloud_watch_logs_log_group_arn': ''
            #     # },
            #     # 'INDEX_SLOW_LOGS': {
            #     #     'enabled': True,
            #     #     'cloud_watch_logs_log_group_arn': ''
            #     # }
            # }
        )

        # Not yet on the roadmap...
        # See https://github.com/aws-cloudformation/aws-cloudformation-coverage-roadmap/issues/283
        # self.es_domain.add_property_override('ElasticsearchClusterConfig.WarmEnabled', True)
        # self.es_domain.add_property_override('ElasticsearchClusterConfig.WarmCount', 2)
        # self.es_domain.add_property_override('ElasticsearchClusterConfig.WarmType', 'ultrawarm1.large.elasticsearch')

        # Deny all roles from the authentication provider - users must be added to groups
        # This lambda function will bootstrap the Elasticsearch cluster
        cognito_function_name = 'CognitoFix'
        cognito_template_lambda = _lambda.Function(
            scope=self,
            id='CognitoFixLambda',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset(
                'streaming/streaming_cdk/cognito-lambda'),
            handler='handler.handler',
            environment={
                'REGION': scope.region,
                'USER_POOL_ID': self.__user_pool.ref,
                'IDENTITY_POOL_ID': self.__id_pool.ref,
                'LIMITED_ROLE_ARN': limited_auth_role.role_arn
            },
            timeout=Duration.minutes(15),
            function_name=cognito_function_name)

        lambda_role = cognito_template_lambda.role
        lambda_role.add_to_policy(
            PolicyStatement(
                actions=['logs:CreateLogGroup'],
                resources=[stack.format_arn(service='logs', resource='*')]))
        lambda_role.add_to_policy(
            PolicyStatement(
                actions=['logs:CreateLogStream', 'logs:PutLogEvents'],
                resources=[
                    stack.format_arn(service='logs',
                                     resource='log_group',
                                     resource_name='/aws/lambda/' +
                                     cognito_function_name + ':*')
                ]))
        lambda_role.add_to_policy(
            PolicyStatement(actions=['cognito-idp:ListUserPoolClients'],
                            resources=[self.user_pool.attr_arn]))
        lambda_role.add_to_policy(
            PolicyStatement(actions=['iam:PassRole'],
                            resources=[limited_auth_role.role_arn]))

        cognito_id_res = Fn.join(':', [
            'arn:aws:cognito-identity', scope.region, scope.account,
            Fn.join('/', ['identitypool', self.__id_pool.ref])
        ])

        lambda_role.add_to_policy(
            PolicyStatement(actions=['cognito-identity:SetIdentityPoolRoles'],
                            resources=[cognito_id_res]))

        # Get the Domain Endpoint and register it with the lambda as environment variable.
        register_template_lambda.add_environment(
            'DOMAIN', self.__es_domain.attr_domain_endpoint)

        CfnOutput(scope=self,
                  id='createUserUrl',
                  description="Create a new user in the user pool here.",
                  value="https://" + scope.region +
                  ".console.aws.amazon.com/cognito/users?region=" +
                  scope.region + "#/pool/" + self.user_pool.ref + "/users")
        CfnOutput(scope=self,
                  id='kibanaUrl',
                  description="Access Kibana via this URL.",
                  value="https://" + self.__es_domain.attr_domain_endpoint +
                  "/_plugin/kibana/")

        bootstrap_lambda_provider = Provider(
            scope=self,
            id='BootstrapLambdaProvider',
            on_event_handler=register_template_lambda)
        CustomResource(scope=self,
                       id='ExecuteRegisterTemplate',
                       service_token=bootstrap_lambda_provider.service_token,
                       properties={'Timeout': 900})

        cognito_lambda_provider = Provider(
            scope=self,
            id='CognitoFixLambdaProvider',
            on_event_handler=cognito_template_lambda)
        cognito_fix_resource = CustomResource(
            scope=self,
            id='ExecuteCognitoFix',
            service_token=cognito_lambda_provider.service_token)
        cognito_fix_resource.node.add_dependency(self.__es_domain)
Ejemplo n.º 30
0
    def __init__(
            self,
            stack: core.Stack,
            prefix: str,
            code_repository: Repository,
            task_definition: str,
            app_spec: str,
            ecs_application: EcsApplication,
            main_listener: CfnListener,
            deployments_listener: CfnListener,
            production_target_group,
            deployment_target_group,
            ecs_cluster: Cluster,
    ) -> None:
        """
        Constructor.

        :param stack: A CloudFormation stack to which add this resource.
        :param prefix: Prefix for resource names.
        :param code_repository: A codecommit git repository to push configuration files for ecs deployment.
        :param task_definition: A document which describes how ecs deployment should behave.
        :param app_spec: A document which describes how ecs deployment should behave.
        :param ecs_application: An ecs application for which the deployments are being made.
        :param main_listener: A loadbalancer's main listener for main traffic.
        :param deployments_listener: A loadbalancer's testing listener for testing traffic.
        :param ecs_cluster: An ecs cluster in which our ecs application is located.
        """
        self.__stack = stack
        self.__prefix = prefix
        self.__code_repository = code_repository
        self.__task_definition = task_definition
        self.__app_spec = app_spec
        self.__ecs_application = ecs_application
        self.__main_listener = main_listener
        self.__deployments_listener = deployments_listener
        self.__production_target_group = production_target_group
        self.__deployment_target_group = deployment_target_group
        self.__ecs_cluster = ecs_cluster

        self.__custom_resource_role = Role(
            self.__stack,
            self.__prefix + 'CustomFargateDeploymentGroupRole',
            inline_policies={
                self.__prefix + 'CustomFargateDeploymentGroupPolicy': PolicyDocument(
                    statements=[
                        PolicyStatement(
                            actions=[
                                "codedeploy:GetDeploymentGroup",
                                "codedeploy:CreateDeploymentGroup",
                                "codedeploy:DeleteDeploymentGroup",
                                "codedeploy:UpdateDeploymentGroup",
                            ],
                            resources=['*'],
                            effect=Effect.ALLOW
                        ),
                        PolicyStatement(
                            actions=[
                                "logs:CreateLogGroup",
                                "logs:CreateLogStream",
                                "logs:PutLogEvents"
                            ],
                            resources=['*'],
                            effect=Effect.ALLOW
                        )
                    ]
                )},
            assumed_by=ServicePrincipal('lambda.amazonaws.com')
        )

        self.__deployment_group_role = Role(
            self.__stack,
            self.__prefix + 'FargateDeploymentGroupRole',
            path='/',
            inline_policies={
                self.__prefix + 'FargateDeploymentGroupPolicy': PolicyDocument(
                    statements=[
                        PolicyStatement(
                            actions=[
                                "ecs:DescribeServices",
                                "ecs:CreateTaskSet",
                                "ecs:UpdateServicePrimaryTaskSet",
                                "ecs:DeleteTaskSet",
                                "elasticloadbalancing:DescribeTargetGroups",
                                "elasticloadbalancing:DescribeListeners",
                                "elasticloadbalancing:ModifyListener",
                                "elasticloadbalancing:DescribeRules",
                                "elasticloadbalancing:ModifyRule",
                                "lambda:InvokeFunction",
                                "cloudwatch:DescribeAlarms",
                                "sns:Publish",
                                "s3:GetObject",
                                "s3:GetObjectMetadata",
                                "s3:GetObjectVersion",
                                "iam:PassRole"
                            ],
                            resources=['*'],
                            effect=Effect.ALLOW
                        )
                    ]
                )
            },
            assumed_by=CompositePrincipal(
                ServicePrincipal('ecs-tasks.amazonaws.com'),
                ServicePrincipal('codedeploy.amazonaws.com')
            )
        )