예제 #1
0
    def __init__(self, scope: Construct, id: str, name: str) -> None:
        super().__init__(scope, id)

        self._key = Key(self, f"kms_key_{name}")
        self._key.add_alias(f"alias/kms-{name}")
        self._key.add_to_resource_policy(
            PolicyStatement(effect=Effect.ALLOW,
                            actions=["kms:*"],
                            principals=[AnyPrincipal()],
                            resources=["*"]))
예제 #2
0
    def __init__(self, scope: Construct, id: str, envs: EnvSettings):
        super().__init__(scope, id)

        self.key = Key(self, id="Key", alias=f"alias/{envs.project_name}")

        self.key.add_to_resource_policy(
            PolicyStatement(actions=["kms:Encrypt", "kms:Decrypt"],
                            principals=[AccountRootPrincipal()],
                            resources=["*"]))

        CfnOutput(
            self,
            "KmsKeyArnOutput",
            export_name=self.get_kms_arn_output_export_name(envs),
            value=self.key.key_arn,
        )
예제 #3
0
    def provision_buckets(self, name: str, s3: List[S3]):
        self.buckets = {}
        for bucket, attrs in s3.buckets.items():
            use_sse_kms_key = False
            if attrs.sse_kms_key_id:
                use_sse_kms_key = True
                sse_kms_key = Key.from_key_arn(self, f"{bucket}-kms-key",
                                               attrs.sse_kms_key_id)

            self.buckets[bucket] = Bucket(
                self.scope,
                bucket,
                bucket_name=f"{name}-{bucket}",
                auto_delete_objects=attrs.auto_delete_objects
                and attrs.removal_policy_destroy,
                removal_policy=cdk.RemovalPolicy.DESTROY
                if attrs.removal_policy_destroy else cdk.RemovalPolicy.RETAIN,
                enforce_ssl=True,
                bucket_key_enabled=use_sse_kms_key,
                encryption_key=(sse_kms_key if use_sse_kms_key else None),
                encryption=(BucketEncryption.KMS if use_sse_kms_key else
                            BucketEncryption.S3_MANAGED),
            )
            self.buckets[bucket].add_to_resource_policy(
                iam.PolicyStatement(
                    sid="DenyIncorrectEncryptionHeader",
                    effect=iam.Effect.DENY,
                    principals=[iam.ArnPrincipal("*")],
                    actions=[
                        "s3:PutObject",
                    ],
                    resources=[f"{self.buckets[bucket].bucket_arn}/*"],
                    conditions={
                        "StringNotEquals": {
                            "s3:x-amz-server-side-encryption":
                            "aws:kms" if use_sse_kms_key else "AES256"
                        }
                    },
                ))
            self.buckets[bucket].add_to_resource_policy(
                iam.PolicyStatement(
                    sid="DenyUnEncryptedObjectUploads",
                    effect=iam.Effect.DENY,
                    principals=[iam.ArnPrincipal("*")],
                    actions=[
                        "s3:PutObject",
                    ],
                    resources=[f"{self.buckets[bucket].bucket_arn}/*"],
                    conditions={
                        "Null": {
                            "s3:x-amz-server-side-encryption": "true"
                        }
                    },
                ))
            self.s3_api_statement.add_resources(
                f"{self.buckets[bucket].bucket_arn}*")
            cdk.CfnOutput(self.scope,
                          f"{bucket}-output",
                          value=self.buckets[bucket].bucket_name)
예제 #4
0
    def set_local_disk_encryption(self, encryption_key: kms.Key, ebs_encryption: bool = True):
        if self._rehydrated:
            raise ReadOnlyEMRProfileError()

        self._local_disk_encryption_configuration = {
            'EncryptionKeyProviderType': 'AwsKms',
            'AwsKmsKey': encryption_key.key_arn
        }
        encryption_key.grant_encrypt_decrypt(self._roles.instance_role)

        if ebs_encryption:
            self._local_disk_encryption_configuration['EnableEbsEncryption'] = True
            encryption_key.grant_encrypt_decrypt(self._roles.service_role)
            encryption_key.grant(self._roles.service_role, 'kms:CreateGrant', 'kms:ListGrants', 'kms:RevokeGrant')

        self._construct_security_configuration()
        return self
예제 #5
0
    def authorize_output_key(self, key: kms.Key):
        if self._rehydrated and not self._mutable_instance_role:
            raise ReadOnlyEMRProfileError()

        key.grant_encrypt(self._roles.instance_role).assert_success()
        return self
    def __init__(self, scope: core.Construct, id: str, application_prefix: str,
                 suffix: str, kda_role: Role, **kwargs):
        super().__init__(scope, id, **kwargs)

        stack = Stack.of(self)
        region = stack.region

        # Create Cognito User Pool
        self.__user_pool = CfnUserPool(
            scope=self,
            id='UserPool',
            admin_create_user_config={'allowAdminCreateUserOnly': True},
            policies={'passwordPolicy': {
                'minimumLength': 8
            }},
            username_attributes=['email'],
            auto_verified_attributes=['email'],
            user_pool_name=application_prefix + '_user_pool')

        # Create a Cognito User Pool Domain using the newly created Cognito User Pool
        CfnUserPoolDomain(scope=self,
                          id='CognitoDomain',
                          domain=application_prefix + '-' + suffix,
                          user_pool_id=self.user_pool.ref)

        # Create Cognito Identity Pool
        self.__id_pool = CfnIdentityPool(
            scope=self,
            id='IdentityPool',
            allow_unauthenticated_identities=False,
            cognito_identity_providers=[],
            identity_pool_name=application_prefix + '_identity_pool')

        trust_relationship = FederatedPrincipal(
            federated='cognito-identity.amazonaws.com',
            conditions={
                'StringEquals': {
                    'cognito-identity.amazonaws.com:aud': self.id_pool.ref
                },
                'ForAnyValue:StringLike': {
                    'cognito-identity.amazonaws.com:amr': 'authenticated'
                }
            },
            assume_role_action='sts:AssumeRoleWithWebIdentity')
        # IAM role for master user
        master_auth_role = Role(scope=self,
                                id='MasterAuthRole',
                                assumed_by=trust_relationship)
        # Role for authenticated user
        limited_auth_role = Role(scope=self,
                                 id='LimitedAuthRole',
                                 assumed_by=trust_relationship)
        # Attach Role to Identity Pool
        CfnIdentityPoolRoleAttachment(
            scope=self,
            id='userPoolRoleAttachment',
            identity_pool_id=self.id_pool.ref,
            roles={'authenticated': limited_auth_role.role_arn})
        # Create master-user-group
        CfnUserPoolGroup(scope=self,
                         id='AdminsGroup',
                         user_pool_id=self.user_pool.ref,
                         group_name='master-user-group',
                         role_arn=master_auth_role.role_arn)
        # Create limited-user-group
        CfnUserPoolGroup(scope=self,
                         id='UsersGroup',
                         user_pool_id=self.user_pool.ref,
                         group_name='limited-user-group',
                         role_arn=limited_auth_role.role_arn)
        # Role for the Elasticsearch service to access Cognito
        es_role = Role(scope=self,
                       id='EsRole',
                       assumed_by=ServicePrincipal(service='es.amazonaws.com'),
                       managed_policies=[
                           ManagedPolicy.from_aws_managed_policy_name(
                               'AmazonESCognitoAccess')
                       ])

        # Use the following command line to generate the python dependencies layer content
        # pip3 install -t lambda-layer/python/lib/python3.8/site-packages -r lambda/requirements.txt
        # Build the lambda layer assets
        subprocess.call([
            'pip', 'install', '-t',
            'streaming/streaming_cdk/lambda-layer/python/lib/python3.8/site-packages',
            '-r', 'streaming/streaming_cdk/bootstrap-lambda/requirements.txt',
            '--upgrade'
        ])

        requirements_layer = _lambda.LayerVersion(
            scope=self,
            id='PythonRequirementsTemplate',
            code=_lambda.Code.from_asset(
                'streaming/streaming_cdk/lambda-layer'),
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_8])

        # This lambda function will bootstrap the Elasticsearch cluster
        bootstrap_function_name = 'AESBootstrap'
        register_template_lambda = _lambda.Function(
            scope=self,
            id='RegisterTemplate',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset(
                'streaming/streaming_cdk/bootstrap-lambda'),
            handler='es-bootstrap.lambda_handler',
            environment={
                'REGION': region,
                'KDA_ROLE_ARN': kda_role.role_arn,
                'MASTER_ROLE_ARN': master_auth_role.role_arn
            },
            layers=[requirements_layer],
            timeout=Duration.minutes(15),
            function_name=bootstrap_function_name)

        lambda_role = register_template_lambda.role
        lambda_role.add_to_policy(
            PolicyStatement(
                actions=['logs:CreateLogGroup'],
                resources=[stack.format_arn(service='logs', resource='*')]))
        lambda_role.add_to_policy(
            PolicyStatement(
                actions=['logs:CreateLogStream', 'logs:PutLogEvents'],
                resources=[
                    stack.format_arn(service='logs',
                                     resource='log_group',
                                     resource_name='/aws/lambda/' +
                                     bootstrap_function_name + ':*')
                ]))

        # Let the lambda assume the master role so that actions can be executed on the cluster
        # https://aws.amazon.com/premiumsupport/knowledge-center/lambda-function-assume-iam-role/
        lambda_role.add_to_policy(
            PolicyStatement(actions=['sts:AssumeRole'],
                            resources=[master_auth_role.role_arn]))

        master_auth_role.assume_role_policy.add_statements(
            PolicyStatement(actions=['sts:AssumeRole'],
                            principals=[lambda_role]))

        # List all the roles that are allowed to access the Elasticsearch cluster.
        roles = [
            ArnPrincipal(limited_auth_role.role_arn),
            ArnPrincipal(master_auth_role.role_arn),
            ArnPrincipal(kda_role.role_arn)
        ]  # The users
        if register_template_lambda and register_template_lambda.role:
            roles.append(ArnPrincipal(
                lambda_role.role_arn))  # The lambda used to bootstrap
        # Create kms key
        kms_key = Key(scope=self,
                      id='kms-es',
                      alias='custom/es',
                      description='KMS key for Elasticsearch domain',
                      enable_key_rotation=True)

        # AES Log Groups
        es_app_log_group = logs.LogGroup(scope=self,
                                         id='EsAppLogGroup',
                                         retention=logs.RetentionDays.ONE_WEEK,
                                         removal_policy=RemovalPolicy.RETAIN)

        # Create the Elasticsearch domain
        es_domain_arn = stack.format_arn(service='es',
                                         resource='domain',
                                         resource_name=application_prefix +
                                         '/*')

        es_access_policy = PolicyDocument(statements=[
            PolicyStatement(principals=roles,
                            actions=[
                                'es:ESHttpGet', 'es:ESHttpPut',
                                'es:ESHttpPost', 'es:ESHttpDelete'
                            ],
                            resources=[es_domain_arn])
        ])
        self.__es_domain = es.CfnDomain(
            scope=self,
            id='searchDomain',
            elasticsearch_cluster_config={
                'instanceType': 'r5.large.elasticsearch',
                'instanceCount': 2,
                'dedicatedMasterEnabled': True,
                'dedicatedMasterCount': 3,
                'dedicatedMasterType': 'r5.large.elasticsearch',
                'zoneAwarenessEnabled': True,
                'zoneAwarenessConfig': {
                    'AvailabilityZoneCount': '2'
                },
            },
            encryption_at_rest_options={
                'enabled': True,
                'kmsKeyId': kms_key.key_id
            },
            node_to_node_encryption_options={'enabled': True},
            ebs_options={
                'volumeSize': 10,
                'ebsEnabled': True
            },
            elasticsearch_version='7.9',
            domain_name=application_prefix,
            access_policies=es_access_policy,
            cognito_options={
                'enabled': True,
                'identityPoolId': self.id_pool.ref,
                'roleArn': es_role.role_arn,
                'userPoolId': self.user_pool.ref
            },
            advanced_security_options={
                'enabled': True,
                'internalUserDatabaseEnabled': False,
                'masterUserOptions': {
                    'masterUserArn': master_auth_role.role_arn
                }
            },
            domain_endpoint_options={
                'enforceHttps': True,
                'tlsSecurityPolicy': 'Policy-Min-TLS-1-2-2019-07'
            },
            # log_publishing_options={
            #     # 'ES_APPLICATION_LOGS': {
            #     #     'enabled': True,
            #     #     'cloud_watch_logs_log_group_arn': es_app_log_group.log_group_arn
            #     # },
            #     # 'AUDIT_LOGS': {
            #     #     'enabled': True,
            #     #     'cloud_watch_logs_log_group_arn': ''
            #     # },
            #     # 'SEARCH_SLOW_LOGS': {
            #     #     'enabled': True,
            #     #     'cloud_watch_logs_log_group_arn': ''
            #     # },
            #     # 'INDEX_SLOW_LOGS': {
            #     #     'enabled': True,
            #     #     'cloud_watch_logs_log_group_arn': ''
            #     # }
            # }
        )

        # Not yet on the roadmap...
        # See https://github.com/aws-cloudformation/aws-cloudformation-coverage-roadmap/issues/283
        # self.es_domain.add_property_override('ElasticsearchClusterConfig.WarmEnabled', True)
        # self.es_domain.add_property_override('ElasticsearchClusterConfig.WarmCount', 2)
        # self.es_domain.add_property_override('ElasticsearchClusterConfig.WarmType', 'ultrawarm1.large.elasticsearch')

        # Deny all roles from the authentication provider - users must be added to groups
        # This lambda function will bootstrap the Elasticsearch cluster
        cognito_function_name = 'CognitoFix'
        cognito_template_lambda = _lambda.Function(
            scope=self,
            id='CognitoFixLambda',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset(
                'streaming/streaming_cdk/cognito-lambda'),
            handler='handler.handler',
            environment={
                'REGION': scope.region,
                'USER_POOL_ID': self.__user_pool.ref,
                'IDENTITY_POOL_ID': self.__id_pool.ref,
                'LIMITED_ROLE_ARN': limited_auth_role.role_arn
            },
            timeout=Duration.minutes(15),
            function_name=cognito_function_name)

        lambda_role = cognito_template_lambda.role
        lambda_role.add_to_policy(
            PolicyStatement(
                actions=['logs:CreateLogGroup'],
                resources=[stack.format_arn(service='logs', resource='*')]))
        lambda_role.add_to_policy(
            PolicyStatement(
                actions=['logs:CreateLogStream', 'logs:PutLogEvents'],
                resources=[
                    stack.format_arn(service='logs',
                                     resource='log_group',
                                     resource_name='/aws/lambda/' +
                                     cognito_function_name + ':*')
                ]))
        lambda_role.add_to_policy(
            PolicyStatement(actions=['cognito-idp:ListUserPoolClients'],
                            resources=[self.user_pool.attr_arn]))
        lambda_role.add_to_policy(
            PolicyStatement(actions=['iam:PassRole'],
                            resources=[limited_auth_role.role_arn]))

        cognito_id_res = Fn.join(':', [
            'arn:aws:cognito-identity', scope.region, scope.account,
            Fn.join('/', ['identitypool', self.__id_pool.ref])
        ])

        lambda_role.add_to_policy(
            PolicyStatement(actions=['cognito-identity:SetIdentityPoolRoles'],
                            resources=[cognito_id_res]))

        # Get the Domain Endpoint and register it with the lambda as environment variable.
        register_template_lambda.add_environment(
            'DOMAIN', self.__es_domain.attr_domain_endpoint)

        CfnOutput(scope=self,
                  id='createUserUrl',
                  description="Create a new user in the user pool here.",
                  value="https://" + scope.region +
                  ".console.aws.amazon.com/cognito/users?region=" +
                  scope.region + "#/pool/" + self.user_pool.ref + "/users")
        CfnOutput(scope=self,
                  id='kibanaUrl',
                  description="Access Kibana via this URL.",
                  value="https://" + self.__es_domain.attr_domain_endpoint +
                  "/_plugin/kibana/")

        bootstrap_lambda_provider = Provider(
            scope=self,
            id='BootstrapLambdaProvider',
            on_event_handler=register_template_lambda)
        CustomResource(scope=self,
                       id='ExecuteRegisterTemplate',
                       service_token=bootstrap_lambda_provider.service_token,
                       properties={'Timeout': 900})

        cognito_lambda_provider = Provider(
            scope=self,
            id='CognitoFixLambdaProvider',
            on_event_handler=cognito_template_lambda)
        cognito_fix_resource = CustomResource(
            scope=self,
            id='ExecuteCognitoFix',
            service_token=cognito_lambda_provider.service_token)
        cognito_fix_resource.node.add_dependency(self.__es_domain)
    def __init__(self, scope: core.Construct, id: str, prefix: str,
                 source_bucket: s3.Bucket, dest_bucket: s3.Bucket,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        suffix = Fn.select(
            4, Fn.split('-', Fn.select(2, Fn.split('/', self.stack_id))))

        # KMS key for Kinesis Data Streams
        self.__kms_key = Key(scope=self,
                             id='kms-kinesis',
                             alias='custom/kinesis',
                             description='KMS key for Kinesis Data Streams',
                             enable_key_rotation=True)

        # Create Kinesis streams
        self.__sale_stream = Stream(scope=self,
                                    id="saleStream",
                                    stream_name="ara-web-sale",
                                    encryption_key=self.__kms_key)
        self.__address_stream = Stream(scope=self,
                                       id="addressStream",
                                       stream_name="ara-web-customer-address",
                                       encryption_key=self.__kms_key)
        self.__customer_stream = Stream(scope=self,
                                        id="customerStream",
                                        stream_name="ara-web-customer",
                                        encryption_key=self.__kms_key)

        # Role for the KDA service
        kda_role = Role(scope=self,
                        id='KinesisAnalyticsRole',
                        assumed_by=ServicePrincipal(
                            service='kinesisanalytics.amazonaws.com'))

        # Grant read on Kinesis streams
        self.__customer_stream.grant_read(kda_role)
        self.__address_stream.grant_read(kda_role)
        self.__sale_stream.grant_read(kda_role)

        # Grant read on source bucket (reference data)
        source_bucket.grant_read(kda_role)
        # Grant write on destination bucket
        dest_bucket.grant_write(kda_role)

        kda_role.add_to_policy(
            PolicyStatement(actions=['kinesis:ListShards'],
                            resources=[
                                self.__customer_stream.stream_arn,
                                self.__address_stream.stream_arn,
                                self.__sale_stream.stream_arn
                            ]))

        # Create Elasticsearch domain
        # TODO: use VPC subnets
        es_domain = EsDomain(scope=self,
                             id='EsDomain',
                             application_prefix=prefix,
                             suffix=suffix,
                             kda_role=kda_role)

        # Create the KDA application after the Elasticsearch service
        kda_app = KdaApplication(scope=self,
                                 id='KdaApplication',
                                 es_domain=es_domain.es_domain,
                                 kda_role=kda_role,
                                 source_bucket=source_bucket,
                                 dest_bucket=dest_bucket)

        core.Tags.of(self).add('module-name', 'streaming')
예제 #8
0
    def add_low_efs_burst_credit_alarms(self, filesystem: FileSystem,
                                        email_address: str) -> None:
        '''
        Set up CloudWatch Alarms that will warn when the given filesystem's burst credits are below
        four different thresholds. We send an email to the given address when an Alarm breaches.
        '''
        # Set up the SNS Topic that will send the emails.
        # ====================
        # 1) KMS key to use to encrypt events within the SNS Topic. The Key is optional
        key = Key(
            self,
            'SNSEncryptionKey',
            description=
            'Used to encrypt the SNS Topic for sending EFS Burst Credit alerts',
            enable_key_rotation=True,
            removal_policy=RemovalPolicy.DESTROY,
            trust_account_identities=True)
        key.grant(ServicePrincipal('cloudwatch.amazonaws.com'), 'kms:Decrypt',
                  'kms:GenerateDataKey')

        # 2) SNS Topic that will be alerted by CloudWatch and will send the email in response.
        sns_topic = Topic(self, 'BurstAlertEmailTopic', master_key=key)
        sns_topic.grant_publish(ServicePrincipal('cloudwatch.amazonaws.com'))
        sns_topic.add_subscription(EmailSubscription(email_address))
        alarm_action = SnsAction(sns_topic)

        # Set up the CloudWatch Alarm(s) and have them trigger SNS events when breached.
        # ======================
        # 1) CDK helper to define the CloudWatch Metric that we're interested in.
        burst_credits_metric = Metric(
            metric_name='BurstCreditBalance',
            namespace='AWS/EFS',
            dimensions={"FileSystemId": filesystem.file_system_id},
            # One 99-th percentile data point sample every hour
            period=Duration.hours(1),
            statistic='p99')

        # 2) Create the alarms
        thresholds = [
            {
                "id": 'CAUTION-EfsBurstCredits',
                "name": f"CAUTION Burst Credits - {filesystem.file_system_id}",
                "threshold": int(2.00 * 2**40),
                "message":
                f"CAUTION. 2 TiB Threshold Breached: EFS {filesystem.file_system_id} is depleting burst credits. Add data to the EFS to increase baseline throughput.",
                # Alarm after 6 datapoints below threshold. We have 1 datapoint every hour. So, we alarm if below threshold for 6hrs
                "datapoints": 6
            },
            {
                "id": 'WARNING-EfsBurstCredits',
                "name": f"WARNING Burst Credits - {filesystem.file_system_id}",
                "threshold": int(1.25 * 2**40),
                "message":
                f"WARNING. 1.25 TiB Threshold Breached: EFS {filesystem.file_system_id} is depleting burst credits. Add data to the EFS to increase baseline throughput.",
                # Alarm after 6 datapoints below threshold. We have 1 datapoint every hour. So, we alarm if below threshold for 6hrs
                "datapoints": 6
            },
            {
                "id": 'ALERT-EfsBurstCredits',
                "name": f"ALERT Burst Credits - {filesystem.file_system_id}",
                "threshold": int(0.50 * 2**40),
                "message":
                f"ALERT! 500 GiB Threshold Breached: EFS {filesystem.file_system_id} is running out of burst credits. Add data to the EFS to increase baseline throughput or else the Render Farm may cease operation.",
                # Alarm after 6 datapoints below threshold. We have 1 datapoint every hour. So, we alarm if below threshold for 6hrs
                "datapoints": 6
            },
            {
                "id": 'EMERGENCY-EfsBurstCredits',
                "name":
                f"EMERGENCY Burst Credits - {filesystem.file_system_id}",
                "threshold": int(0.10 * 2**40),
                "message":
                f"EMERGENCY! 100 GiB Threshold Breached: EFS {filesystem.file_system_id} is running out of burst credits. Add data to the EFS to increase baseline throughput or else the Render Farm will cease operation.",
                # Alarm after 2 datapoints below threshold. We have 1 datapoint every hour. So, we alarm if below threshold for 2hrs
                "datapoints": 2
            },
        ]
        for config in thresholds:
            alarm = burst_credits_metric.create_alarm(
                self,
                config['id'],
                alarm_name=config['name'],
                actions_enabled=True,
                alarm_description=config['message'],
                treat_missing_data=TreatMissingData.NOT_BREACHING,
                threshold=config['threshold'],
                comparison_operator=ComparisonOperator.LESS_THAN_THRESHOLD,
                evaluation_periods=config['datapoints'])
            alarm.add_alarm_action(alarm_action)