Exemplo n.º 1
0
 def block_public_access():
     block_public_access = s3.BlockPublicAccess(
         block_public_acls=True,
         block_public_policy=True,
         ignore_public_acls=True,
         restrict_public_buckets=True)
     return block_public_access
Exemplo n.º 2
0
 def define_bucket(self):
     """
     This defines the S3 bucket where the
     Angular application will be hosted from.
     """
     policy = get_bucket_policy()
     bucket_name = 'worker-heavy-angular-web-application-host'
     bucket = s3.Bucket(
         self,
         'angular_app_bucket',
         access_control=s3.BucketAccessControl.PUBLIC,
         block_public_access=s3.BlockPublicAccess(
             block_public_acls=False,
             block_public_policy=False
         ),
         bucket_name=bucket_name,
         public_read_access=True,
         removal_policy=core.RemovalPolicy.DESTROY,
         versioned=True,
     )
     resource = (
         policy['Statement']['Resource']
         .replace('${bucket_name}', bucket_name)
     )
     bucket.addToResourcePolicy(
         iam.PolicyStatement(
             actions=policy['Statement']['Action'],
             effect=policy['Statement']['Effect'],
             principals=policy['Statement']['Principal']
             sid=policy['Statement']['Sid'],
             resources=resources
         )
     )
Exemplo n.º 3
0
 def default_block_public_access(self):
     return s3.BlockPublicAccess(
             ignore_public_acls=True,
             block_public_acls=True,
             block_public_policy=True,
             restrict_public_buckets=True
     )
Exemplo n.º 4
0
    def __init__(
        self, scope: core.Construct, id, bucket_name
    ):  #public_read_access, website_index_document, website_error_document):

        super().__init__(scope=scope,
                         id=id,
                         bucket_name=bucket_name,
                         removal_policy=core.RemovalPolicy.DESTROY,
                         block_public_access=s3.BlockPublicAccess(
                             restrict_public_buckets=True))
 def default_block_public_access():
     """
     Block public access by default
     """
     block_public_access = s3.BlockPublicAccess(
         block_public_acls=True,
         block_public_policy=True,
         ignore_public_acls=True,
         restrict_public_buckets=True)
     return block_public_access
Exemplo n.º 6
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        publicAccess = s3.BlockPublicAccess(block_public_acls=True,
                                            block_public_policy=True,
                                            ignore_public_acls=True,
                                            restrict_public_buckets=True)
        bucket = s3.Bucket(self,
                           "cdk-bucket",
                           versioned=True,
                           block_public_access=publicAccess)
Exemplo n.º 7
0
    def __init__(self, app: core.App, id: str) -> None:
        super().__init__(app, id)

        EVENTBRITE_TOKEN = os.environ.get('EVENTBRITE_TOKEN')
        NPS_KEY = os.environ.get('NPS_KEY')
        
        # create s3 bucket to put results
        bucket = s3.Bucket(
            self, 'results-bucket',
            versioned=False,
            removal_policy=core.RemovalPolicy.DESTROY,
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=False,
                ignore_public_acls=False,
                block_public_policy=True,
                restrict_public_buckets=True
            )
        )

        # create lambda to scrape events
        lambda_scrapers = lambda_.Function(
            self, "scrapers",
            code=lambda_.Code.from_asset('lambda-releases/scrapers.zip'),
            handler="get_events.main",
            timeout=core.Duration.seconds(600),
            runtime=lambda_.Runtime.PYTHON_3_6,
            memory_size=1000
        )
        
        # set env vars
        lambda_scrapers.add_environment('NPS_KEY', NPS_KEY)
        lambda_scrapers.add_environment('EVENTBRITE_TOKEN', EVENTBRITE_TOKEN)
        lambda_scrapers.add_environment('BUCKET_NAME', bucket.bucket_name)
        
        # trigger every 1st and 15th of the month at 18:00 UTC (1pm EST)
        rule = events.Rule(
            self, "Rule",
            schedule=events.Schedule.cron(
                minute='0',
                hour='18',
                day="1,15",
                month='*',
                year='*'),
        )
        rule.add_target(targets.LambdaFunction(lambda_scrapers))

        # grant permissions to lambda to use bucket
        bucket.grant_read_write(lambda_scrapers)
def add_website_bucket(self):
    website_bucket = _s3.Bucket(self,
                                "WebsiteBucket",
                                website_index_document="index.html",
                                block_public_access=_s3.BlockPublicAccess(
                                    restrict_public_buckets=False),
                                removal_policy=core.RemovalPolicy.DESTROY)

    bucket_policy = _iam.PolicyStatement(
        actions=['s3:GetObject'],
        resources=[f'{website_bucket.bucket_arn}/*'],
        principals=[_iam.Anyone()])

    website_bucket.add_to_resource_policy(bucket_policy)

    return website_bucket
Exemplo n.º 9
0
    def __init__(
        self,
        scope: core.Construct,
        construct_id: str,
        elastic_domain: aes.Domain,
        **kwargs,
    ) -> None:
        super().__init__(scope, construct_id, **kwargs)
        indexing_lambda = _lambda.Function(
            self,
            "IndexingHandler",
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset(LAMBDA_PATH),
            handler="lambda_function.lambda_handler",
            environment={
                "EMBEDDER_IP": config.get_embedder_ip(),
                "ES_URL": elastic_domain.domain_endpoint,
                "ES_USER": config.get_es_credentials()[0],
                "ES_PASSWORD": config.get_es_credentials()[1],
                "INDEX_NAME": config.get_es_index(),
            },
        )
        notification = s3n.LambdaDestination(indexing_lambda)

        block_public_access = s3.BlockPublicAccess(
            block_public_acls=True,
            block_public_policy=True,
            ignore_public_acls=True,
            restrict_public_buckets=True)
        bucket = s3.Bucket(self,
                           "DocsDestination",
                           block_public_access=block_public_access,
                           removal_policy=core.RemovalPolicy.DESTROY)
        bucket.grant_read(indexing_lambda)
        bucket.add_event_notification(
            s3.EventType.OBJECT_CREATED,
            notification,
            s3.NotificationKeyFilter(prefix="wikipages/"),
        )

        core.Tags.of(indexing_lambda).add("system-id", config.get_system_id())
        core.Tags.of(bucket).add("system-id", config.get_system_id())

        core.CfnOutput(self, "S3BucketName", value=bucket.bucket_name)
        core.CfnOutput(self,
                       "IndexingLambdaName",
                       value=indexing_lambda.function_name)
 def create_bucket(self, _access_logs_bucket: Bucket, stage):
     return Bucket(self,
                   "S3bucket",
                   bucket_name="staticsite202104" + stage,
                   encryption=BucketEncryption.S3_MANAGED,
                   removal_policy=RemovalPolicy.DESTROY,
                   auto_delete_objects=True,
                   versioned=True,
                   website_index_document="index.html",
                   website_error_document="index.html",
                   server_access_logs_bucket=_access_logs_bucket,
                   server_access_logs_prefix="gatsbystaticsite",
                   block_public_access=_s3.BlockPublicAccess(
                       block_public_policy=True,
                       block_public_acls=True,
                       ignore_public_acls=True,
                       restrict_public_buckets=True))
Exemplo n.º 11
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        prj_name = self.node.try_get_context("project_name")
        env_name = self.node.try_get_context("env")

        media_distribution_oai = cloudfront.OriginAccessIdentity(
            self, 'media-distribution-oai')
        media_distribution_oai.apply_removal_policy(core.RemovalPolicy.DESTROY)

        frontend_bucket = s3.Bucket(
            self,
            'frontend-bucket',
            access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            bucket_name=prj_name + env_name + '-bucket',
            website_index_document='index.html',
            website_error_document='index.html',
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True),
            removal_policy=core.RemovalPolicy.DESTROY)

        media_assets = s3_deployment.BucketDeployment(
            self,
            'media-assets',
            sources=[s3_deployment.Source.asset('./assets')],
            destination_bucket=frontend_bucket)

        media_distribution = cloudfront.CloudFrontWebDistribution(
            self,
            'media-distribution',
            origin_configs=[
                cloudfront.SourceConfiguration(
                    behaviors=[cloudfront.Behavior(is_default_behavior=True)],
                    s3_origin_source=cloudfront.S3OriginConfig(
                        s3_bucket_source=frontend_bucket,
                        origin_access_identity=cloudfront.OriginAccessIdentity(
                            self, 'frontend-origin')))
            ],
            #Edege server location https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_cloudfront/PriceClass.html#aws_cdk.aws_cloudfront.PriceClass
            price_class=cloudfront.PriceClass.PRICE_CLASS_ALL)
        media_distribution.apply_removal_policy(core.RemovalPolicy.DESTROY)
Exemplo n.º 12
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        rand_int = secrets.randbelow(1000001)

        self._instance = s3.Bucket(
            self,
            "dataops-analytics-bucket",
            bucket_name=os.environ.get("BUCKET_NAME",
                                       f"dataops-analytics-{rand_int}"),
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True,
            ),
            removal_policy=core.RemovalPolicy.DESTROY,
            versioned=False,
        )
Exemplo n.º 13
0
def create_source_bucket(self):
    # S3
    _s3.Bucket(self, 'SourceBucket',
               bucket_name = 'demo-codepipeline-source-bucket',
               block_public_access =
               _s3.BlockPublicAccess(
                   block_public_acls=False,
                   block_public_policy=False,
                   ignore_public_acls=False,
                   restrict_public_buckets=False
               ),
               versioned = True,
               removal_policy = RemovalPolicy.DESTROY
               )

    source_bucket=_s3.Bucket.from_bucket_name(
        self, 'SourceBucketName',
        'demo-codepipeline-source-bucket'
    )
    return source_bucket
Exemplo n.º 14
0
    def _create_emr_logging_bucket(self):
        bucket_name = (
            # make bucket_name 's3-wide-unique' otherwise it cannot be created
            f"emr-logs-udacity-final-project")
        bucket_id = f"{bucket_name}-bucket"

        bucket = s3.Bucket(
            self,
            id=bucket_id,
            bucket_name=bucket_name,
            encryption=s3.BucketEncryption.S3_MANAGED,
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True,
            ),
        )

        return bucket
Exemplo n.º 15
0
def create_artifact_bucket(self):
    # S3
    _s3.Bucket(self, 'ArtifactBucket',
       bucket_name = 'demo-codepipeline-artifact-bucket',
       block_public_access =
           _s3.BlockPublicAccess(
               block_public_acls=False,
               block_public_policy=False,
               ignore_public_acls=False,
               restrict_public_buckets=False
           ),
       versioned = False,
       removal_policy = RemovalPolicy.DESTROY
   )

    artifact_bucket=_s3.Bucket.from_bucket_name(
        self, 'ArtifactBucketName',
        'demo-codepipeline-artifact-bucket'
    )
    return artifact_bucket
Exemplo n.º 16
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        prj_name = self.node.try_get_context("project_name")
        env_name = self.node.try_get_context("env")

        account_id = core.Aws.ACCOUNT_ID

        bucket1 = s3.Bucket(
            self,
            'default-bucket',
            bucket_name=account_id + '-' + prj_name + '-' + env_name +
            '-default-bucket',
            access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            removal_policy=core.RemovalPolicy.DESTROY,
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True))
Exemplo n.º 17
0
    def __init__(self, app: core.App, id: str, **kwargs) -> None:
        super().__init__(app, id)

        account_id = core.Aws.ACCOUNT_ID
        prj_name = self.node.try_get_context("project_name")
        env_name = self.node.try_get_context("env")

        # pipeline artifacts bucket
        artifacts_bucket = s3.Bucket(self, 'artifact-bucket',
                                     access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
                                     encryption=s3.BucketEncryption.S3_MANAGED,
                                     block_public_access=s3.BlockPublicAccess(
                                         block_public_acls=True,
                                         block_public_policy=True,
                                         ignore_public_acls=True,
                                         restrict_public_buckets=True
                                     ),
                                     removal_policy=core.RemovalPolicy.DESTROY
                                     )
        core.CfnOutput(self, 's3-build-artifacts-export',
                       value=artifacts_bucket.bucket_name,
                       export_name='build-artifacts-bucket')
Exemplo n.º 18
0
    def _create_data_bucket(self):
        """
        This bucket will be the place where our EMR output data is stored. Also the glue crawler will use this
        for inferring our schemas
        """
        bucket_name = f"capstone-uda-data"
        bucket_id = f"{bucket_name}-bucket"

        bucket = s3.Bucket(
            self,
            id=bucket_id,
            bucket_name=bucket_name,
            encryption=s3.BucketEncryption.S3_MANAGED,
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True,
            ),
        )

        return bucket
Exemplo n.º 19
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        prj_name = self.node.try_get_context("project_name")
        env_name = self.node.try_get_context("env")

        account_id = core.Aws.ACCOUNT_ID
        lambda_bucket = s3.Bucket(
            self,
            'lambda-bucket',
            access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            bucket_name=account_id + '-' + env_name +
            '-lambda-deploy-packages',
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True),
            removal_policy=core.RemovalPolicy.RETAIN)

        ssm.StringParameter(self,
                            'ssm-lambda-bucket',
                            parameter_name='/' + env_name +
                            '/lambda-s3-bucket',
                            string_value=lambda_bucket.bucket_name)

        #To Store Build Artifacts

        artifacts_bucket = s3.Bucket(
            self,
            "build-artifacts",
            access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            bucket_name=account_id + '-' + env_name + '-build-artifacts',
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True),
            removal_policy=core.RemovalPolicy.DESTROY)

        core.CfnOutput(self,
                       's3-build-artifacts-export',
                       value=artifacts_bucket.bucket_name,
                       export_name='build-artifacts-bucket')

        #To Store Frontend App

        frontend_bucket = s3.Bucket(
            self,
            "frontend",
            access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            bucket_name=account_id + '-' + env_name + '-frontend',
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True))

        core.CfnOutput(self,
                       's3-frontend-export',
                       value=frontend_bucket.bucket_name,
                       export_name='frontend-bucket')

        #CloudTrail Bucket

        self.cloudtrail_bucket = s3.Bucket(
            self,
            "cloudtrail",
            access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            bucket_name=account_id + '-' + env_name + '-cloudtrail',
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True))
Exemplo n.º 20
0
    def __init__(self, scope: core.Construct, id: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        prj_name = self.node.try_get_context('project_name')
        env_name = self.node.try_get_context('env')

        # using account_id in bucket name because buckets have to
        # be globally unique.  We dont NEED to use account id
        account_id = core.Aws.ACCOUNT_ID

        self.lambda_bucket = s3.Bucket(
            self,
            id=f'{env_name}-lambda-bucket',
            access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            bucket_name=f'{env_name}-{account_id}-lambda-deploy-packages',
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True),
            removal_policy=core.RemovalPolicy.DESTROY  # RETAIN
        )

        core.CfnOutput(self,
                       id='lambda_bucket_id',
                       value=self.lambda_bucket.bucket_name,
                       export_name='lambda-bucket')

        ssm.StringParameter(self,
                            id=f"{env_name}-ssm-lambda-bucket",
                            parameter_name=f'/{env_name}/lambda-s3-bucket',
                            string_value=self.lambda_bucket.bucket_name)

        frontend_bucket = s3.Bucket(
            self,
            "frontend",
            access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            bucket_name=f'{env_name}-' + account_id + '-' + env_name +
            '-frontend',
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True),
            removal_policy=core.RemovalPolicy.DESTROY)

        core.CfnOutput(self,
                       's3-frontend-export',
                       value=frontend_bucket.bucket_name,
                       export_name='frontend-bucket')

        #CloudTrail Bucket

        self.cloudtrail_bucket = s3.Bucket(
            self,
            "cloudtrail",
            access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            bucket_name=f'{env_name}-' + account_id + '-' + env_name +
            '-cloudtrail',
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True),
            removal_policy=core.RemovalPolicy.DESTROY)
Exemplo n.º 21
0
    def __init__(self, scope: core.Construct, id: str, config_dict,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        """ Get VPC details """
        Ivpc = ec2.Vpc.from_lookup(self, "VPC", vpc_id=config_dict['vpc_id'])
        """ Get sunet seclection context created """
        subnet_1 = ec2.Subnet.from_subnet_attributes(
            self,
            "subnet_1",
            subnet_id=config_dict['SubnetIds'].split(",")[0],
            availability_zone=config_dict['AvailabilityZones'].split(",")[0])
        subnet_2 = ec2.Subnet.from_subnet_attributes(
            self,
            "subnet_2",
            subnet_id=config_dict['SubnetIds'].split(",")[1],
            availability_zone=config_dict['AvailabilityZones'].split(",")[1])
        """ Create Security Group for Lambda Functions """
        lambda_security_group = "datalake-lambda-sg"

        createLambdaSecurityGroup = ec2.SecurityGroup(
            self,
            "createLambdaSecurityGroup",
            vpc=Ivpc,
            allow_all_outbound=True,
            description="This security group will be used for Lambda Funcs",
            security_group_name=lambda_security_group)
        """ Create the Datalake Bucket """
        createDatalakeBucket = s3.Bucket(
            self,
            "createCompRegBucket",
            bucket_name=config_dict['datalake_bucket_name'],
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True))

        core.CfnOutput(self,
                       "createCompRegBucketName",
                       value=createDatalakeBucket.bucket_name)
        """ Create Comp Reg Lambda Function """
        createCompRegLambda = _lambda.Function(
            self,
            "createCompRegLambda",
            function_name="datalake-comp-reg-trigger",
            description=
            "This lambda function will trigger the compound reg pipeline.",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="trigger_compound_reg_pipeline.lambda_handler",
            code=_lambda.Code.asset('lambdas'),
            timeout=core.Duration.seconds(90),
            vpc=Ivpc,
            vpc_subnets=ec2.SubnetSelection(subnets=[subnet_1, subnet_2]),
            security_group=createLambdaSecurityGroup,
            initial_policy=[
                iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                    actions=["s3:*", "batch:*"],
                                    resources=["*"])
            ])
        """ Add s3 event trigger to above lambda function """
        createCompRegLambda.add_event_source(
            S3EventSource(createDatalakeBucket,
                          events=[s3.EventType.OBJECT_CREATED],
                          filters=[
                              s3.NotificationKeyFilter(
                                  prefix="compound_reg/triggers/",
                                  suffix=".trigger")
                          ]))
Exemplo n.º 22
0
    def __init__(self, app: core.App, id: str) -> None:
        super().__init__(app, id)

        ##################################
        # Resource Property Config
        ##################################

        # see: https://docs.aws.amazon.com/lambda/latest/dg/
        # with-sqs.html#events-sqs-queueconfig
        domain_scan_timeout = 900
        queue_visibility_timeout = 6 * domain_scan_timeout
        # tldextract needs to cache tld list after request and /tmp is writable in Lamdba
        tld_cache = os.path.join('/tmp', '.tld_set')

        ##################################
        # Domain Gatherer Lambda and Queue
        ##################################

        # create queue
        domain_queue = sqs.Queue(
            self,
            'domain-queue',
            visibility_timeout=core.Duration.seconds(queue_visibility_timeout),
            dead_letter_queue=sqs.DeadLetterQueue(
                max_receive_count=5,
                queue=sqs.Queue(self,
                                'domain-queue-dlq',
                                retention_period=core.Duration.days(5))))

        # create lambda to gather domains
        domain_gatherer_lambda = lambda_.Function(
            self,
            "domain-gatherer",
            code=lambda_.Code.from_asset(
                'lambda-releases/domain-gatherer.zip'),
            handler="handler.main",
            timeout=core.Duration.seconds(600),
            runtime=lambda_.Runtime.PYTHON_3_7,
            memory_size=400)
        # set env vars
        domain_gatherer_lambda.add_environment('SQS_URL',
                                               domain_queue.queue_url)
        domain_gatherer_lambda.add_environment('TLDEXTRACT_CACHE', tld_cache)

        # provide lambda with execution role
        domain_gatherer_lambda_exec_policy = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=[
                'lambda:InvokeFunction', 'sqs:SendMessage',
                'sqs:DeleteMessage', 'sqs:SendMessageBatch',
                'sqs:SetQueueAttributes', "sqs:GetQueueAttributes",
                "sqs:GetQueueUrl", 'sqs:GetQueueAttributes'
            ],
            resources=[domain_queue.queue_arn])
        domain_gatherer_lambda.add_to_role_policy(
            domain_gatherer_lambda_exec_policy)

        # allow lambda to send messages to queue
        domain_queue.grant_send_messages(domain_gatherer_lambda)

        # create rule to run the lambda every Friday
        rule = events.Rule(
            self,
            "Rule",
            schedule=events.Schedule.cron(minute='0',
                                          hour='18',
                                          month='*',
                                          week_day='FRI',
                                          year='*'),
        )
        rule.add_target(targets.LambdaFunction(domain_gatherer_lambda))

        ##################################
        # Domain Scan Lambda and Results Bucket
        ##################################

        # create lambda to scan domains
        domain_scanner_lambda = lambda_.Function(
            self,
            "domain-scanner",
            code=lambda_.Code.from_asset('lambda-releases/domain-scanner.zip'),
            handler="handler.main",
            timeout=core.Duration.seconds(domain_scan_timeout),
            runtime=lambda_.Runtime.PYTHON_3_7,
            memory_size=1000)

        # create sqs event source for domain scan lambda
        domain_scanner_lambda.add_event_source(
            sources.SqsEventSource(domain_queue, batch_size=2))

        # create s3 bucket to put results
        bucket = s3.Bucket(self,
                           'results-bucket',
                           versioned=False,
                           removal_policy=core.RemovalPolicy.DESTROY,
                           block_public_access=s3.BlockPublicAccess(
                               block_public_acls=False,
                               ignore_public_acls=False,
                               block_public_policy=True,
                               restrict_public_buckets=True))

        # grant s3:PUT to the pa11y lambda
        bucket.grant_put(domain_scanner_lambda)

        # set an env var for bucket name
        domain_scanner_lambda.add_environment('BUCKET_NAME',
                                              bucket.bucket_name)

        # create execution role for domain scanner lambda
        domain_scanner_lambda_exec_policy = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=[
                'lambda:InvokeFunction', 'sqs:SendMessage',
                'sqs:DeleteMessage', 'sqs:SendMessageBatch',
                'sqs:SetQueueAttributes', "sqs:GetQueueAttributes",
                "sqs:GetQueueUrl", 'sqs:GetQueueAttributes', "s3:PutObject",
                "s3:PutObjectAcl"
            ],
            resources=[bucket.bucket_arn])
        domain_scanner_lambda.add_to_role_policy(
            domain_scanner_lambda_exec_policy)
Exemplo n.º 23
0
    def __init__(self, app: core.App, id: str) -> None:
        super().__init__(app, id)

        ##################################
        # Lambda Timeouts (seconds) & Queue Redrive
        ##################################

        lambda_gatherer_timeout = 600
        lambda_joiner_timeout = 350
        # pa11y's timeout is set to 50, so the lambda is just a little longer
        lambda_a11y_scan_timeout = 55
        max_receive_count = 2

        ##################################
        # S3 Bucket with Domains
        ##################################

        asset = aws_s3_assets.Asset(
            self, 'domain-list', path=os.path.abspath('./domains/domains.csv'))

        ##################################
        # Domain Gatherer Lambda and Queue
        ##################################

        domain_queue = sqs.Queue(
            self,
            'domain-queue',
            visibility_timeout=core.Duration.seconds(
                (max_receive_count + 1) * lambda_gatherer_timeout),
            dead_letter_queue=sqs.DeadLetterQueue(
                max_receive_count=max_receive_count,
                queue=sqs.Queue(self,
                                'domain-queue-dlq',
                                retention_period=core.Duration.days(5))))

        lambda_gatherer = lambda_.Function(
            self,
            "domain-gatherer",
            code=lambda_.Code.from_asset('./lambdas/domain_gatherer'),
            handler="handler.main",
            timeout=core.Duration.seconds(lambda_gatherer_timeout),
            runtime=lambda_.Runtime.PYTHON_3_7,
            memory_size=150)

        lambda_gatherer.add_environment('SQS_URL', domain_queue.queue_url)
        lambda_gatherer.add_environment('BUCKET_NAME', asset.s3_bucket_name)
        lambda_gatherer.add_environment('OBJECT_KEY', asset.s3_object_key)

        lambda_gatherer_sqs_exec_policy = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=[
                'lambda:InvokeFunction', 'sqs:SendMessage',
                'sqs:DeleteMessage', 'sqs:SendMessageBatch',
                'sqs:SetQueueAttributes', 'sqs:GetQueueAttributes',
                'sqs:GetQueueUrl', 'sqs:GetQueueAttributes'
            ],
            resources=[domain_queue.queue_arn])
        lambda_gatherer.add_to_role_policy(lambda_gatherer_sqs_exec_policy)
        domain_queue.grant_send_messages(lambda_gatherer)

        # trigger for 1st and 15th of the month at 18:00 UTC (1pm EST)
        lambda_gatherer_rule = events.Rule(self,
                                           "Lambda Gatherer Rule",
                                           schedule=events.Schedule.cron(
                                               minute='0',
                                               hour='18',
                                               day="1,15",
                                               month='*',
                                               year='*'))
        lambda_gatherer_rule.add_target(
            targets.LambdaFunction(lambda_gatherer))
        asset.grant_read(lambda_gatherer)

        ##################################
        # A11y Scanner Lambda and S3
        ##################################

        layer = lambda_.LayerVersion(
            self,
            'chrome-aws-lambda',
            code=lambda_.Code.from_asset('./lambdas/chrome_aws_lambda.zip'),
            compatible_runtimes=[lambda_.Runtime.NODEJS_12_X],
            description='A layer of chrome-aws-lambda')

        lambda_a11y_scan = lambda_.Function(
            self,
            "a11y-scan",
            code=lambda_.Code.from_asset('./lambdas/a11y_scan'),
            handler="index.handler",
            timeout=core.Duration.seconds(lambda_a11y_scan_timeout),
            runtime=lambda_.Runtime.NODEJS_12_X,
            memory_size=1000,
            layers=[layer])

        lambda_a11y_scan.add_event_source(
            sources.SqsEventSource(domain_queue, batch_size=1))

        # create s3 bucket to put results
        results_bucket = s3.Bucket(self,
                                   'results-bucket',
                                   versioned=False,
                                   removal_policy=core.RemovalPolicy.DESTROY,
                                   block_public_access=s3.BlockPublicAccess(
                                       block_public_acls=True,
                                       ignore_public_acls=True,
                                       block_public_policy=True,
                                       restrict_public_buckets=True),
                                   lifecycle_rules=[
                                       s3.LifecycleRule(
                                           enabled=True,
                                           expiration=core.Duration.days(10))
                                   ])

        lambda_a11y_scan.add_environment('BUCKET_NAME',
                                         results_bucket.bucket_name)
        results_bucket.grant_put(lambda_a11y_scan)

        ##################################
        # Results Joiner Lambda
        ##################################

        # create s3 bucket to put site data
        data_bucket = s3.Bucket(self,
                                'data-bucket',
                                versioned=False,
                                removal_policy=core.RemovalPolicy.DESTROY,
                                block_public_access=s3.BlockPublicAccess(
                                    block_public_acls=True,
                                    ignore_public_acls=True,
                                    block_public_policy=True,
                                    restrict_public_buckets=True))

        lambda_joiner = lambda_.Function(
            self,
            "results-joiner",
            code=lambda_.Code.from_asset(
                './lambda-releases/results_joiner.zip'),
            handler="handler.main",
            timeout=core.Duration.seconds(lambda_joiner_timeout),
            runtime=lambda_.Runtime.PYTHON_3_7,
            memory_size=400)
        lambda_joiner.add_environment('DATA_BUCKET_NAME',
                                      data_bucket.bucket_name)
        lambda_joiner.add_environment('RESULTS_BUCKET_NAME',
                                      results_bucket.bucket_name)
        results_bucket.grant_read_write(lambda_joiner)
        data_bucket.grant_read_write(lambda_joiner)

        # trigger for 8th and 23rd of the month at 18:00 UTC (1pm EST)
        lambda_joiner_rule = events.Rule(self,
                                         "Lambda Joiner Rule",
                                         schedule=events.Schedule.cron(
                                             minute='0',
                                             hour='18',
                                             day="8,23",
                                             month='*',
                                             year='*'))
        lambda_joiner_rule.add_target(targets.LambdaFunction(lambda_joiner))
Exemplo n.º 24
0
    def __init__(self, scope: Construct, construct_id: str,
                 **kwargs: str) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self.vpc = ec2.Vpc(
            self,
            "aws-data-wrangler-vpc",
            cidr="11.19.224.0/19",
            enable_dns_hostnames=True,
            enable_dns_support=True,
        )
        Tags.of(self.vpc).add("Name", "aws-data-wrangler")
        self.key = kms.Key(
            self,
            id="aws-data-wrangler-key",
            description="Aws Data Wrangler Test Key.",
            policy=iam.PolicyDocument(statements=[
                iam.PolicyStatement(
                    sid="Enable IAM User Permissions",
                    effect=iam.Effect.ALLOW,
                    actions=["kms:*"],
                    principals=[iam.AccountRootPrincipal()],
                    resources=["*"],
                )
            ]),
        )
        kms.Alias(
            self,
            "aws-data-wrangler-key-alias",
            alias_name="alias/aws-data-wrangler-key",
            target_key=self.key,
        )
        self.bucket = s3.Bucket(
            self,
            id="aws-data-wrangler",
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True,
            ),
            lifecycle_rules=[
                s3.LifecycleRule(
                    id="CleaningUp",
                    enabled=True,
                    expiration=Duration.days(1),
                    abort_incomplete_multipart_upload_after=Duration.days(1),
                ),
            ],
            versioned=True,
        )
        glue_db = glue.Database(
            self,
            id="aws_data_wrangler_glue_database",
            database_name="aws_data_wrangler",
            location_uri=f"s3://{self.bucket.bucket_name}",
        )
        log_group = logs.LogGroup(
            self,
            id="aws_data_wrangler_log_group",
            retention=logs.RetentionDays.ONE_MONTH,
        )
        log_stream = logs.LogStream(
            self,
            id="aws_data_wrangler_log_stream",
            log_group=log_group,
        )
        CfnOutput(self, "Region", value=self.region)
        CfnOutput(
            self,
            "VPC",
            value=self.vpc.vpc_id,
            export_name="aws-data-wrangler-base-VPC",
        )
        CfnOutput(
            self,
            "PublicSubnet1",
            value=self.vpc.public_subnets[0].subnet_id,
            export_name="aws-data-wrangler-base-PublicSubnet1",
        )
        CfnOutput(
            self,
            "PublicSubnet2",
            value=self.vpc.public_subnets[1].subnet_id,
            export_name="aws-data-wrangler-base-PublicSubnet2",
        )
        CfnOutput(
            self,
            "PrivateSubnet",
            value=self.vpc.private_subnets[0].subnet_id,
            export_name="aws-data-wrangler-base-PrivateSubnet",
        )
        CfnOutput(
            self,
            "KmsKeyArn",
            value=self.key.key_arn,
            export_name="aws-data-wrangler-base-KmsKeyArn",
        )
        CfnOutput(
            self,
            "BucketName",
            value=self.bucket.bucket_name,
            export_name="aws-data-wrangler-base-BucketName",
        )
        CfnOutput(self, "GlueDatabaseName", value=glue_db.database_name)
        CfnOutput(self, "LogGroupName", value=log_group.log_group_name)
        CfnOutput(self, "LogStream", value=log_stream.log_stream_name)
	def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
		super().__init__(scope, id, **kwargs)

	# CloudFormation Parameters

		glue_db_name = core.CfnParameter(self, "GlueDatabaseNameNycTlc", 
				type="String",
				description="Name of Glue Database to be created for NYC TLC.",
				allowed_pattern="[\w-]+",
				default = "nyc_tlc_db"
			)

		glue_table_name = core.CfnParameter(self, "GlueTableNameNycTlc", 
				type="String",
				description="Name of Glue Table to be created for NYC TLC.",
				allowed_pattern="[\w-]+",
				default = "nyc_tlc_table"
			)

		self.template_options.description = "\
This template deploys the dataset containing New York City Taxi and Limousine Commission (TLC) Trip Record Data.\n \
Sample data is copied from the public dataset into a local S3 bucket, a database and table are created in AWS Glue, \
and the S3 location is registered with AWS Lake Formation."

		self.template_options.metadata = {
			"AWS::CloudFormation::Interface": {
				"License": "MIT-0"
			}
		}
	# Create S3 bucket for storing a copy of the Dataset locally in the AWS Account

		local_dataset_bucket = s3.Bucket(self, "LocalNycTlcBucket",
			block_public_access = s3.BlockPublicAccess(
				block_public_acls=True, 
				block_public_policy=True, 
				ignore_public_acls=True, 
				restrict_public_buckets=True),
			removal_policy = core.RemovalPolicy.DESTROY)

		public_dataset_bucket = s3.Bucket.from_bucket_arn(self, "PublicDatasetBucket", BUCKET_ARN)

		with open("lambda/s3_copy.py", encoding="utf8") as fp:
			s3_copy_code = fp.read()

		s3_copy_execution_role = iam.Role(self, "S3CopyHandlerServiceRole",
			assumed_by = iam.ServicePrincipal('lambda.amazonaws.com'),
			managed_policies = [
				iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole"),
			],
			inline_policies = { "S3CopyHandlerRoleInlinePolicy" : iam.PolicyDocument( 
				statements = [
					iam.PolicyStatement(
						effect=iam.Effect.ALLOW,
						actions=[
							"s3:Get*"
						],
						resources=[
							public_dataset_bucket.bucket_arn,
							public_dataset_bucket.arn_for_objects("*")
						]),
					iam.PolicyStatement(
						effect=iam.Effect.ALLOW,
						actions=[
							"s3:PutObject",
							"s3:GetObject",
							"s3:DeleteObject"
						],
						resources=[local_dataset_bucket.arn_for_objects("*")]
						)
					]
				) }
			)

		s3_copy_fn = _lambda.Function(self, "S3CopyHandler", 
			runtime = _lambda.Runtime.PYTHON_3_7,
			code = _lambda.InlineCode.from_inline(s3_copy_code),
			handler = "index.handler",
			role =  s3_copy_execution_role,
			timeout = core.Duration.seconds(600)
		)

		s3_copy = core.CustomResource(self, "S3Copy", 
			service_token = s3_copy_fn.function_arn,
			resource_type = "Custom::S3Copy",
			properties = {
				"PublicDatasetBucket": public_dataset_bucket.bucket_name,
				"LocalDatasetBucket" : local_dataset_bucket.bucket_name,
				"PublicDatasetObject": OBJECT,
				"LocalDatasetPrefix": glue_table_name.value_as_string
			} 
		)	

	# Create Database, Table and Partitions for Amazon Reviews

		lakeformation_resource = lf.CfnResource(self, "LakeFormationResource", 
			resource_arn = local_dataset_bucket.bucket_arn, 
			use_service_linked_role = True)

		lakeformation_resource.node.add_dependency(s3_copy)

		cfn_glue_db = glue.CfnDatabase(self, "GlueDatabase", 
			catalog_id = core.Aws.ACCOUNT_ID,
			database_input = glue.CfnDatabase.DatabaseInputProperty(
				name = glue_db_name.value_as_string, 
				location_uri=local_dataset_bucket.s3_url_for_object(),
			)
		)

		nyc_tlc_table = glue.CfnTable(self, "GlueTableNycTlc", 
			catalog_id = cfn_glue_db.catalog_id,
			database_name = glue_db_name.value_as_string,
			table_input = glue.CfnTable.TableInputProperty(
				description = "New York City Taxi and Limousine Commission (TLC) Trip Record Data",
				name = glue_table_name.value_as_string,
				parameters = {
					"skip.header.line.count": "1",
					"compressionType": "none",
					"classification": "csv",
					"delimiter": ",",
					"typeOfData": "file"
				},
				storage_descriptor = glue.CfnTable.StorageDescriptorProperty(
					columns = [
						{"name":"vendorid","type":"bigint"},
						{"name":"lpep_pickup_datetime","type":"string"},
						{"name":"lpep_dropoff_datetime","type":"string"},
						{"name":"store_and_fwd_flag","type":"string"},
						{"name":"ratecodeid","type":"bigint"},
						{"name":"pulocationid","type":"bigint"},
						{"name":"dolocationid","type":"bigint"},
						{"name":"passenger_count","type":"bigint"},
						{"name":"trip_distance","type":"double"},
						{"name":"fare_amount","type":"double"},
						{"name":"extra","type":"double"},
						{"name":"mta_tax","type":"double"},
						{"name":"tip_amount","type":"double"},
						{"name":"tolls_amount","type":"double"},
						{"name":"ehail_fee","type":"string"},
						{"name":"improvement_surcharge","type":"double"},
						{"name":"total_amount","type":"double"},
						{"name":"payment_type","type":"bigint"},
						{"name":"trip_type","type":"bigint"},
						{"name":"congestion_surcharge","type":"double"}],
					location = local_dataset_bucket.s3_url_for_object() + "/" + glue_table_name.value_as_string + "/",
					input_format = "org.apache.hadoop.mapred.TextInputFormat",
					output_format = "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat",
					compressed = False,
					serde_info = glue.CfnTable.SerdeInfoProperty( 
						serialization_library = "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",
						parameters = {
							"field.delim": ","
						}
					)
				),
				table_type = "EXTERNAL_TABLE"
			)
		)

		nyc_tlc_table.node.add_dependency(cfn_glue_db)

		core.CfnOutput(self, "LocalNycTlcBucketOutput", 
			value=local_dataset_bucket.bucket_name, 
			description="S3 Bucket created to store the dataset")

		core.CfnOutput(self, "GlueDatabaseOutput", 
			value=cfn_glue_db.ref, 
			description="Glue DB created to host the dataset table")

		core.CfnOutput(self, "GlueTableNycTlcOutput", 
			value=nyc_tlc_table.ref, 
			description="Glue Table created to host the dataset")
Exemplo n.º 26
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        prj_name = self.node.try_get_context("project_name")
        env_name = self.node.try_get_context("env")

        #Lambda packages
        lambda_bucket = s3.Bucket(
            self,
            "lambda-packages",
            access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            bucket_name=prj_name + '-' + env_name + '-lambda-deploy-packages',
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True))

        core.CfnOutput(self,
                       's3-lambda-export',
                       value=lambda_bucket.bucket_name,
                       export_name='lambda-bucket')

        #images
        images_bucket = s3.Bucket(
            self,
            "images",
            access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            bucket_name=prj_name + '-' + env_name + '-images',
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True))

        core.CfnOutput(self,
                       's3-images-export',
                       value=images_bucket.bucket_name,
                       export_name='images-bucket')

        #AccessLogs
        accesslogs_bucket = s3.Bucket(
            self,
            "accesslogs",
            #access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            bucket_name=prj_name + '-' + env_name + '-accesslogs',
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True))

        core.CfnOutput(self,
                       's3-accesslogs-export',
                       value=accesslogs_bucket.bucket_name,
                       export_name='accesslogs-bucket')

        #website hosting
        webhosting_bucket = s3.Bucket(
            self,
            "webhosting-bucket",
            access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            bucket_name=prj_name + '-' + env_name + '-website-hosting',
            server_access_logs_bucket=accesslogs_bucket,
            server_access_logs_prefix="logs/",
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True))

        core.CfnOutput(self,
                       's3-website-export',
                       value=webhosting_bucket.bucket_name,
                       export_name='webhosting-bucket')

        #Frontend
        frontend_bucket = s3.Bucket(
            self,
            "frontend",
            access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            bucket_name=prj_name + '-' + env_name + '-frontend',
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True))

        core.CfnOutput(self,
                       's3-frontend-export',
                       value=frontend_bucket.bucket_name,
                       export_name='frontend-bucket')

        #Admin
        admin_bucket = s3.Bucket(
            self,
            "admin",
            access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            bucket_name=prj_name + '-' + env_name + '-admin',
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True))

        core.CfnOutput(self,
                       's3-admin-export',
                       value=admin_bucket.bucket_name,
                       export_name='admin-bucket')

        #Build Logs
        build_logs_bucket = s3.Bucket(
            self,
            "build-logs",
            access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            bucket_name=prj_name + '-' + env_name + '-build-logs',
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True),
            removal_policy=core.RemovalPolicy.DESTROY)

        core.CfnOutput(self,
                       's3-build-logs-export',
                       value=build_logs_bucket.bucket_name,
                       export_name='build-logs-bucket')

        #FrontEnd Artifacts
        frontend_artifacts_bucket = s3.Bucket(
            self,
            "frontend-artifacts",
            access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            bucket_name=prj_name + '-' + env_name + '-frontend-artifacts',
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True),
            removal_policy=core.RemovalPolicy.DESTROY)

        core.CfnOutput(self,
                       's3-frontend-artifacts-export',
                       value=frontend_artifacts_bucket.bucket_name,
                       export_name='frontend-artifacts-bucket')

        #Admin Artifacts
        admin_artifacts_bucket = s3.Bucket(
            self,
            "admin-artifacts",
            access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            bucket_name=prj_name + '-' + env_name + '-admin-artifacts',
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True),
            removal_policy=core.RemovalPolicy.DESTROY)

        core.CfnOutput(self,
                       's3-admin-artifacts-export',
                       value=admin_artifacts_bucket.bucket_name,
                       export_name='admin-artifacts-bucket')

        #CloudTrail Logs
        cloudtrail_bucket = s3.Bucket(
            self,
            "cloudtrail-logs",
            access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            bucket_name=prj_name + '-' + env_name + '-cloudtrail-logs',
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True))

        core.CfnOutput(self,
                       's3-cloudtrail-export',
                       value=cloudtrail_bucket.bucket_name,
                       export_name='cloudtrail-bucket')
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # TODO: Criar  IAM Roles
        # sm_role_principal = _iam.IPrincipal('sagemaker.amazonaws.com')
        # sm_managed_policy = _iam.IManagedPolicy('AmazonSageMakerFullAccess')
        # sm_iam_role = _iam.Role(self, id='sagemaker_role', assumed_by=sm_role_principal)

        # TODO: criar security groups do publico pra privada e da privada pra db

        vpc_main = _ec2.Vpc(
            self,
            'vpc-main-prd',
            cidr='10.0.0.0/16',
            max_azs=2,
            subnet_configuration=[
                _ec2.SubnetConfiguration(name='Ingress',
                                         subnet_type=_ec2.SubnetType.PUBLIC,
                                         cidr_mask=24),
                _ec2.SubnetConfiguration(name='in-app',
                                         subnet_type=_ec2.SubnetType.PRIVATE,
                                         cidr_mask=24),
                _ec2.SubnetConfiguration(name='in-db',
                                         subnet_type=_ec2.SubnetType.ISOLATED,
                                         cidr_mask=28)
            ])

        # Security Group Basics
        ipv4_peer = _ec2.Peer.any_ipv4()
        https_port = _ec2.Port(protocol=_ec2.Protocol.TCP,
                               from_port=443,
                               to_port=443,
                               string_representation='HTTPS-PORT')

        # Security Groups
        sg_lambda_function1 = _ec2.SecurityGroup(
            self,
            id='lambda-function1',
            vpc=vpc_main,
            security_group_name='lambda-function1',
            description='SecurityGroup for LambdaFunction1',
            allow_all_outbound=True)

        sg_lambda_function1.add_ingress_rule(peer=ipv4_peer,
                                             connection=https_port)

        # Tags
        core.Tag.add(sg_lambda_function1,
                     key='Name',
                     value='lambda-function1-SG')

        # TODO: Necessidades em ordem de prioridade
        # 1- Buckets s3:
        # sagemaker-dumps
        # datascience
        # 2- Sagemaker Notebook Instance (conectando no s3 bucket dedicado)
        # 3- Lambda Function
        # 4- API Gateway
        # 5- Infra para Airflow

        # TODO: lambda_s3_bucket
        lambda_code_bucket = _s3.Bucket(
            self,
            'lambdacode',
            bucket_name='lambda-code-data-2019',
            encryption=_s3.BucketEncryption.KMS_MANAGED,
            block_public_access=_s3.BlockPublicAccess(
                restrict_public_buckets=True))

        # TODO: lambda
        lambda_function_with_code = _lambda.Function(
            self,
            id='lambda_function1',
            code=_lambda.Code.asset('lambda'),
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler='lambda-handler.handler',
            vpc=vpc_main,
            vpc_subnets=_ec2.SubnetSelection(
                subnet_type=_ec2.SubnetType.PRIVATE),
            security_group=sg_lambda_function1)
        # TODO: api gatewaycd
        api_gtw_lambda = _apigw.LambdaRestApi(
            self, 'function1Api', handler=lambda_function_with_code)
Exemplo n.º 28
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        cidr_block: str,
        platform_identifier: str = 'covariate-ingest',
        **kwargs
    ) -> None:
        super().__init__(scope, id, **kwargs)

        self.lambda_function_role_name = f'{platform_identifier}-lambda-function'
        self.node.set_context('lambda_function_role_name', self.lambda_function_role_name)

        self.batch_job_role_name = f'{platform_identifier}-batch-job'
        self.node.set_context('batch_job_role_name', self.batch_job_role_name)

        self.vpc = ec2.Vpc(
            self,
            "vpc",
            enable_dns_hostnames=True,
            enable_dns_support=True,
            flow_logs={
                "default":
                    ec2.FlowLogOptions(
                        destination=ec2.FlowLogDestination.to_cloud_watch_logs()
                    )
            },
            # max_azs=99,  # Means use all AZs
            max_azs=3,
            cidr=cidr_block,
            # configuration will create a subnet for each config, in each AZ.
            # So us-east-1 3 public, and 3 private
            subnet_configuration=[
                ec2.SubnetConfiguration(
                    name="Public",
                    cidr_mask=24,
                    subnet_type=ec2.SubnetType.PUBLIC,
                ),
                ec2.SubnetConfiguration(
                    subnet_type=ec2.SubnetType.PRIVATE,
                    name="Private",
                    cidr_mask=20
                )
            ],
            gateway_endpoints={
                "S3":
                    ec2.GatewayVpcEndpointOptions(
                        service=ec2.GatewayVpcEndpointAwsService.S3
                    )
            },
        )
        self.vpc.add_interface_endpoint(
            "EcrDockerEndpoint",
            service=ec2.InterfaceVpcEndpointAwsService.ECR_DOCKER
        )

        # Public NACL
        self.nacl_public = ec2.NetworkAcl(
            self,
            "nacl_public",
            vpc=self.vpc,
            subnet_selection=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC
            )
        )
        self.nacl_public.add_entry(
            "in-rule",
            rule_number=95,
            cidr=ec2.AclCidr.any_ipv4(),
            rule_action=ec2.Action.ALLOW,
            direction=ec2.TrafficDirection.INGRESS,
            traffic=ec2.AclTraffic.tcp_port_range(start_port=0, end_port=65535)
        )
        self.nacl_public.add_entry(
            "out-rule",
            rule_number=95,
            cidr=ec2.AclCidr.any_ipv4(),
            rule_action=ec2.Action.ALLOW,
            direction=ec2.TrafficDirection.EGRESS,
            traffic=ec2.AclTraffic.tcp_port_range(start_port=0, end_port=65535)
        )

        # Private NACL
        self.nacl_private = ec2.NetworkAcl(
            self,
            "nacl_private",
            vpc=self.vpc,
            subnet_selection=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PRIVATE
            )
        )
        self.nacl_private.add_entry(
            "in-rule",
            rule_number=95,
            cidr=ec2.AclCidr.any_ipv4(),
            rule_action=ec2.Action.ALLOW,
            direction=ec2.TrafficDirection.INGRESS,
            traffic=ec2.AclTraffic.tcp_port_range(start_port=0, end_port=65432)
        )
        self.nacl_private.add_entry(
            "out-rule",
            rule_number=95,
            cidr=ec2.AclCidr.any_ipv4(),
            rule_action=ec2.Action.ALLOW,
            direction=ec2.TrafficDirection.EGRESS,
            traffic=ec2.AclTraffic.tcp_port_range(start_port=0, end_port=65432)
        )

        # Add Batch Compute Envs
        cpu_instances = [
            ec2.InstanceType('c5.large'),
            ec2.InstanceType('c5.xlarge'),
            ec2.InstanceType('c5.2xlarge'),
            ec2.InstanceType('c5.4xlarge'),
            ec2.InstanceType('m5.large'),
            ec2.InstanceType('m5.xlarge'),
            ec2.InstanceType('m5.2xlarge'),
            ec2.InstanceType('m5.4xlarge'),
        ]

        self.cpu_on_demand = batch.ComputeEnvironment(
            self,
            'batch-cpu-on-demand',
            managed=True,
            enabled=True,
            compute_resources=batch.ComputeResources(
                vpc=self.vpc,  # Will select only private subnets.
                type=batch.ComputeResourceType.ON_DEMAND,
                allocation_strategy=batch.AllocationStrategy.
                BEST_FIT_PROGRESSIVE,
                minv_cpus=0,
                maxv_cpus=640,
                desiredv_cpus=0,
                instance_types=cpu_instances,
                image=ecs.EcsOptimizedImage.amazon_linux2(
                    hardware_type=ecs.AmiHardwareType.STANDARD
                ),
            ),
        )

        self.cpu_spot = batch.ComputeEnvironment(
            self,
            'batch-cpu-spot',
            managed=True,
            enabled=True,
            compute_resources=batch.ComputeResources(
                vpc=self.vpc,  # Will select only private subnets.
                type=batch.ComputeResourceType.SPOT,
                allocation_strategy=batch.AllocationStrategy.
                SPOT_CAPACITY_OPTIMIZED,
                bid_percentage=80,
                minv_cpus=0,
                maxv_cpus=640,
                desiredv_cpus=0,
                instance_types=cpu_instances,
                image=ecs.EcsOptimizedImage.amazon_linux2(
                    hardware_type=ecs.AmiHardwareType.STANDARD
                ),
            ),
        )

        self.cpu_spot_first = batch.JobQueue(
            self,
            'cpu-spot-first',
            job_queue_name=f'{platform_identifier}-cpu-queue',
            compute_environments=[
                batch.JobQueueComputeEnvironment(
                    compute_environment=self.cpu_spot, order=1
                ),
                batch.JobQueueComputeEnvironment(
                    compute_environment=self.cpu_on_demand, order=2
                ),
            ],
            enabled=True,
            priority=10
        )

        self.lambda_function_role = iam.Role(
            self,
            'lambda-function-role',
            role_name=self.lambda_function_role_name,
            description='',
            assumed_by=iam.ServicePrincipal(service='lambda.amazonaws.com'),
        )
        

        self.batch_job_role = iam.Role(
            self,
            'batch-job-role',
            role_name=self.batch_job_role_name,
            description='',
            assumed_by=iam.ServicePrincipal(service='ecs-tasks.amazonaws.com'),
        )

        self.intermediate_bucket = s3.Bucket(
            self,
            f'{platform_identifier}-data-bucket',
            bucket_name=f'{platform_identifier}-data-dev',
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=False,
                block_public_policy=False,
                ignore_public_acls=False,
                restrict_public_buckets=False
            ),
        )
        self.intermediate_bucket.grant_read_write(self.lambda_function_role)
        self.intermediate_bucket.grant_read_write(self.batch_job_role)

        cluster = ecs.Cluster(
            self, 
            "covar-api-cluster",
            cluster_name='covar-service-cluster',
            vpc=self.vpc
        )
Exemplo n.º 29
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        if self.node.try_get_context('vpc_type'):
            validate_cdk_json(self)

        ES_LOADER_TIMEOUT = 600
        ######################################################################
        # REGION mapping / ELB & Lambda Arch
        ######################################################################
        elb_id_temp = region_info.FactName.ELBV2_ACCOUNT
        elb_map_temp = region_info.RegionInfo.region_map(elb_id_temp)
        region_dict = {}
        for region in elb_map_temp:
            # ELB account ID
            region_dict[region] = {'ElbV2AccountId': elb_map_temp[region]}
            # Lambda Arch
            if region in ('us-east-1', 'us-east-2', 'us-west-2', 'ap-south-1',
                          'ap-southeast-1', 'ap-southeast-2', 'ap-northeast-1',
                          'eu-central-1', 'eu-west-1', 'eu-west-2'):
                region_dict[region]['LambdaArch'] = (
                    aws_lambda.Architecture.ARM_64.name)
            else:
                region_dict[region]['LambdaArch'] = (
                    aws_lambda.Architecture.X86_64.name)
        region_mapping = core.CfnMapping(
            scope=self, id='RegionMap', mapping=region_dict)

        ######################################################################
        # get params
        ######################################################################
        allow_source_address = core.CfnParameter(
            self, 'AllowedSourceIpAddresses', allowed_pattern=r'^[0-9./\s]*',
            description='Space-delimited list of CIDR blocks',
            default='10.0.0.0/8 172.16.0.0/12 192.168.0.0/16')
        sns_email = core.CfnParameter(
            self, 'SnsEmail', allowed_pattern=r'^[0-9a-zA-Z@_\-\+\.]*',
            description=('Input your email as SNS topic, where Amazon '
                         'OpenSearch Service will send alerts to'),
            default='*****@*****.**')
        geoip_license_key = core.CfnParameter(
            self, 'GeoLite2LicenseKey', allowed_pattern=r'^[0-9a-zA-Z]{16}$',
            default='xxxxxxxxxxxxxxxx',
            description=("If you wolud like to enrich geoip locaiton such as "
                         "IP address's country, get a license key form MaxMind"
                         " and input the key. If you not, keep "
                         "xxxxxxxxxxxxxxxx"))
        reserved_concurrency = core.CfnParameter(
            self, 'ReservedConcurrency', default=10, type='Number',
            description=('Input reserved concurrency. Increase this value if '
                         'there are steady logs delay despite no errors'))
        aes_domain_name = self.node.try_get_context('aes_domain_name')
        bucket = f'{aes_domain_name}-{core.Aws.ACCOUNT_ID}'
        s3bucket_name_geo = f'{bucket}-geo'
        s3bucket_name_log = f'{bucket}-log'
        s3bucket_name_snapshot = f'{bucket}-snapshot'

        # organizations / multiaccount
        org_id = self.node.try_get_context('organizations').get('org_id')
        org_mgmt_id = self.node.try_get_context(
            'organizations').get('management_id')
        org_member_ids = self.node.try_get_context(
            'organizations').get('member_ids')
        no_org_ids = self.node.try_get_context(
            'no_organizations').get('aws_accounts')

        # Overwrite default S3 bucket name as customer name
        temp_geo = self.node.try_get_context('s3_bucket_name').get('geo')
        if temp_geo:
            s3bucket_name_geo = temp_geo
        else:
            print('Using default bucket names')
        temp_log = self.node.try_get_context('s3_bucket_name').get('log')
        if temp_log:
            s3bucket_name_log = temp_log
        elif org_id or no_org_ids:
            s3bucket_name_log = f'{aes_domain_name}-{self.account}-log'
        else:
            print('Using default bucket names')
        temp_snap = self.node.try_get_context('s3_bucket_name').get('snapshot')
        if temp_snap:
            s3bucket_name_snapshot = temp_snap
        else:
            print('Using default bucket names')
        kms_cmk_alias = self.node.try_get_context('kms_cmk_alias')
        if not kms_cmk_alias:
            kms_cmk_alias = 'aes-siem-key'
            print('Using default key alais')

        ######################################################################
        # deploy VPC when context is defined as using VPC
        ######################################################################
        # vpc_type is 'new' or 'import' or None
        vpc_type = self.node.try_get_context('vpc_type')

        if vpc_type == 'new':
            is_vpc = True
            vpc_cidr = self.node.try_get_context('new_vpc_nw_cidr_block')
            subnet_cidr_mask = int(
                self.node.try_get_context('new_vpc_subnet_cidr_mask'))
            is_vpc = True
            # VPC
            vpc_aes_siem = aws_ec2.Vpc(
                self, 'VpcAesSiem', cidr=vpc_cidr,
                max_azs=3, nat_gateways=0,
                subnet_configuration=[
                    aws_ec2.SubnetConfiguration(
                        subnet_type=aws_ec2.SubnetType.ISOLATED,
                        name='aes-siem-subnet', cidr_mask=subnet_cidr_mask)])
            subnet1 = vpc_aes_siem.isolated_subnets[0]
            subnets = [{'subnet_type': aws_ec2.SubnetType.ISOLATED}]
            vpc_subnets = aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.ISOLATED)
            vpc_aes_siem_opt = vpc_aes_siem.node.default_child.cfn_options
            vpc_aes_siem_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN
            for subnet in vpc_aes_siem.isolated_subnets:
                subnet_opt = subnet.node.default_child.cfn_options
                subnet_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN
        elif vpc_type == 'import':
            vpc_id = self.node.try_get_context('imported_vpc_id')
            vpc_aes_siem = aws_ec2.Vpc.from_lookup(
                self, 'VpcAesSiem', vpc_id=vpc_id)

            subnet_ids = get_subnet_ids(self)
            subnets = []
            for number, subnet_id in enumerate(subnet_ids, 1):
                obj_id = 'Subenet' + str(number)
                subnet = aws_ec2.Subnet.from_subnet_id(self, obj_id, subnet_id)
                subnets.append(subnet)
            subnet1 = subnets[0]
            vpc_subnets = aws_ec2.SubnetSelection(subnets=subnets)

        if vpc_type:
            is_vpc = True
            # Security Group
            sg_vpc_noinbound_aes_siem = aws_ec2.SecurityGroup(
                self, 'AesSiemVpcNoinboundSecurityGroup',
                security_group_name='aes-siem-noinbound-vpc-sg',
                vpc=vpc_aes_siem)

            sg_vpc_aes_siem = aws_ec2.SecurityGroup(
                self, 'AesSiemVpcSecurityGroup',
                security_group_name='aes-siem-vpc-sg',
                vpc=vpc_aes_siem)
            sg_vpc_aes_siem.add_ingress_rule(
                peer=aws_ec2.Peer.ipv4(vpc_aes_siem.vpc_cidr_block),
                connection=aws_ec2.Port.tcp(443),)
            sg_vpc_opt = sg_vpc_aes_siem.node.default_child.cfn_options
            sg_vpc_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

            # VPC Endpoint
            vpc_aes_siem.add_gateway_endpoint(
                'S3Endpoint', service=aws_ec2.GatewayVpcEndpointAwsService.S3,
                subnets=subnets)
            vpc_aes_siem.add_interface_endpoint(
                'SQSEndpoint', security_groups=[sg_vpc_aes_siem],
                service=aws_ec2.InterfaceVpcEndpointAwsService.SQS,)
            vpc_aes_siem.add_interface_endpoint(
                'KMSEndpoint', security_groups=[sg_vpc_aes_siem],
                service=aws_ec2.InterfaceVpcEndpointAwsService.KMS,)
        else:
            is_vpc = False

        is_vpc = core.CfnCondition(
            self, 'IsVpc', expression=core.Fn.condition_equals(is_vpc, True))
        """
        CloudFormation実行時の条件式の書き方
        ClassのBasesが aws_cdk.core.Resource の時は、
        node.default_child.cfn_options.condition = is_vpc
        ClassのBasesが aws_cdk.core.CfnResource の時は、
        cfn_options.condition = is_vpc
        """

        ######################################################################
        # create cmk of KMS to encrypt S3 bucket
        ######################################################################
        kms_aes_siem = aws_kms.Key(
            self, 'KmsAesSiemLog', description='CMK for SIEM solution',
            removal_policy=core.RemovalPolicy.RETAIN)

        aws_kms.Alias(
            self, 'KmsAesSiemLogAlias', alias_name=kms_cmk_alias,
            target_key=kms_aes_siem,
            removal_policy=core.RemovalPolicy.RETAIN)

        kms_aes_siem.add_to_resource_policy(
            aws_iam.PolicyStatement(
                sid='Allow GuardDuty to use the key',
                actions=['kms:GenerateDataKey'],
                principals=[aws_iam.ServicePrincipal(
                    'guardduty.amazonaws.com')],
                resources=['*'],),)

        kms_aes_siem.add_to_resource_policy(
            aws_iam.PolicyStatement(
                sid='Allow VPC Flow Logs to use the key',
                actions=['kms:Encrypt', 'kms:Decrypt', 'kms:ReEncrypt*',
                         'kms:GenerateDataKey*', 'kms:DescribeKey'],
                principals=[aws_iam.ServicePrincipal(
                    'delivery.logs.amazonaws.com')],
                resources=['*'],),)
        # basic policy
        key_policy_basic1 = aws_iam.PolicyStatement(
            sid='Allow principals in the account to decrypt log files',
            actions=['kms:DescribeKey', 'kms:ReEncryptFrom'],
            principals=[aws_iam.AccountPrincipal(
                account_id=core.Aws.ACCOUNT_ID)],
            resources=['*'],)
        kms_aes_siem.add_to_resource_policy(key_policy_basic1)

        # for Athena
        key_policy_athena = aws_iam.PolicyStatement(
            sid='Allow Athena to query s3 objects with this key',
            actions=['kms:Decrypt', 'kms:DescribeKey', 'kms:Encrypt',
                     'kms:GenerateDataKey*', 'kms:ReEncrypt*'],
            principals=[aws_iam.AccountPrincipal(
                account_id=core.Aws.ACCOUNT_ID)],
            resources=['*'],
            conditions={'ForAnyValue:StringEquals': {
                'aws:CalledVia': 'athena.amazonaws.com'}})
        kms_aes_siem.add_to_resource_policy(key_policy_athena)

        # for CloudTrail
        key_policy_trail1 = aws_iam.PolicyStatement(
            sid='Allow CloudTrail to describe key',
            actions=['kms:DescribeKey'],
            principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
            resources=['*'],)
        kms_aes_siem.add_to_resource_policy(key_policy_trail1)

        key_policy_trail2 = aws_iam.PolicyStatement(
            sid=('Allow CloudTrail to encrypt logs'),
            actions=['kms:GenerateDataKey*'],
            principals=[aws_iam.ServicePrincipal(
                'cloudtrail.amazonaws.com')],
            resources=['*'],
            conditions={'StringLike': {
                'kms:EncryptionContext:aws:cloudtrail:arn': [
                    f'arn:aws:cloudtrail:*:{core.Aws.ACCOUNT_ID}:trail/*']}})
        kms_aes_siem.add_to_resource_policy(key_policy_trail2)

        ######################################################################
        # create s3 bucket
        ######################################################################
        block_pub = aws_s3.BlockPublicAccess(
            block_public_acls=True,
            ignore_public_acls=True,
            block_public_policy=True,
            restrict_public_buckets=True
        )
        s3_geo = aws_s3.Bucket(
            self, 'S3BucketForGeoip', block_public_access=block_pub,
            bucket_name=s3bucket_name_geo,
            # removal_policy=core.RemovalPolicy.DESTROY,
        )

        # create s3 bucket for log collector
        s3_log = aws_s3.Bucket(
            self, 'S3BucketForLog', block_public_access=block_pub,
            bucket_name=s3bucket_name_log, versioned=True,
            encryption=aws_s3.BucketEncryption.S3_MANAGED,
            # removal_policy=core.RemovalPolicy.DESTROY,
        )

        # create s3 bucket for aes snapshot
        s3_snapshot = aws_s3.Bucket(
            self, 'S3BucketForSnapshot', block_public_access=block_pub,
            bucket_name=s3bucket_name_snapshot,
            # removal_policy=core.RemovalPolicy.DESTROY,
        )

        ######################################################################
        # IAM Role
        ######################################################################
        # delopyment policy for lambda deploy-aes
        arn_prefix = f'arn:aws:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}'
        loggroup_aes = f'log-group:/aws/aes/domains/{aes_domain_name}/*'
        loggroup_opensearch = (
            f'log-group:/aws/OpenSearchService/domains/{aes_domain_name}/*')
        loggroup_lambda = 'log-group:/aws/lambda/aes-siem-*'
        policydoc_create_loggroup = aws_iam.PolicyDocument(
            statements=[
                aws_iam.PolicyStatement(
                    actions=[
                        'logs:PutResourcePolicy',
                        'logs:DescribeLogGroups',
                        'logs:DescribeLogStreams'
                    ],
                    resources=[f'{arn_prefix}:*', ]
                ),
                aws_iam.PolicyStatement(
                    actions=[
                        'logs:CreateLogGroup', 'logs:CreateLogStream',
                        'logs:PutLogEvents', 'logs:PutRetentionPolicy'],
                    resources=[
                        f'{arn_prefix}:{loggroup_aes}',
                        f'{arn_prefix}:{loggroup_opensearch}',
                        f'{arn_prefix}:{loggroup_lambda}',
                    ],
                )
            ]
        )

        policydoc_crhelper = aws_iam.PolicyDocument(
            statements=[
                aws_iam.PolicyStatement(
                    actions=[
                        'lambda:AddPermission',
                        'lambda:RemovePermission',
                        'events:ListRules',
                        'events:PutRule',
                        'events:DeleteRule',
                        'events:PutTargets',
                        'events:RemoveTargets'],
                    resources=['*']
                )
            ]
        )

        # snaphot rule for AES
        policydoc_snapshot = aws_iam.PolicyDocument(
            statements=[
                aws_iam.PolicyStatement(
                    actions=['s3:ListBucket'],
                    resources=[s3_snapshot.bucket_arn]
                ),
                aws_iam.PolicyStatement(
                    actions=['s3:GetObject', 's3:PutObject',
                             's3:DeleteObject'],
                    resources=[s3_snapshot.bucket_arn + '/*']
                )
            ]
        )
        aes_siem_snapshot_role = aws_iam.Role(
            self, 'AesSiemSnapshotRole',
            role_name='aes-siem-snapshot-role',
            inline_policies=[policydoc_snapshot, ],
            assumed_by=aws_iam.ServicePrincipal('es.amazonaws.com')
        )

        policydoc_assume_snapshrole = aws_iam.PolicyDocument(
            statements=[
                aws_iam.PolicyStatement(
                    actions=['iam:PassRole'],
                    resources=[aes_siem_snapshot_role.role_arn]
                ),
            ]
        )

        aes_siem_deploy_role_for_lambda = aws_iam.Role(
            self, 'AesSiemDeployRoleForLambda',
            role_name='aes-siem-deploy-role-for-lambda',
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AmazonOpenSearchServiceFullAccess'),
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AWSLambdaBasicExecutionRole'),
            ],
            inline_policies=[policydoc_assume_snapshrole, policydoc_snapshot,
                             policydoc_create_loggroup, policydoc_crhelper],
            assumed_by=aws_iam.ServicePrincipal('lambda.amazonaws.com')
        )

        if vpc_type:
            aes_siem_deploy_role_for_lambda.add_managed_policy(
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AWSLambdaVPCAccessExecutionRole')
            )

        # for alert from Amazon OpenSearch Service
        aes_siem_sns_role = aws_iam.Role(
            self, 'AesSiemSnsRole',
            role_name='aes-siem-sns-role',
            assumed_by=aws_iam.ServicePrincipal('es.amazonaws.com')
        )

        # EC2 role
        aes_siem_es_loader_ec2_role = aws_iam.Role(
            self, 'AesSiemEsLoaderEC2Role',
            role_name='aes-siem-es-loader-for-ec2',
            assumed_by=aws_iam.ServicePrincipal('ec2.amazonaws.com'),
        )

        aws_iam.CfnInstanceProfile(
            self, 'AesSiemEsLoaderEC2InstanceProfile',
            instance_profile_name=aes_siem_es_loader_ec2_role.role_name,
            roles=[aes_siem_es_loader_ec2_role.role_name]
        )

        ######################################################################
        # in VPC
        ######################################################################
        aes_role_exist = check_iam_role('/aws-service-role/es.amazonaws.com/')
        if vpc_type and not aes_role_exist:
            slr_aes = aws_iam.CfnServiceLinkedRole(
                self, 'AWSServiceRoleForAmazonOpenSearchService',
                aws_service_name='es.amazonaws.com',
                description='Created by cloudformation of siem stack'
            )
            slr_aes.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN

        ######################################################################
        # SQS for es-laoder's DLQ
        ######################################################################
        sqs_aes_siem_dlq = aws_sqs.Queue(
            self, 'AesSiemDlq', queue_name='aes-siem-dlq',
            retention_period=core.Duration.days(14))

        sqs_aes_siem_splitted_logs = aws_sqs.Queue(
            self, 'AesSiemSqsSplitLogs',
            queue_name='aes-siem-sqs-splitted-logs',
            dead_letter_queue=aws_sqs.DeadLetterQueue(
                max_receive_count=2, queue=sqs_aes_siem_dlq),
            visibility_timeout=core.Duration.seconds(ES_LOADER_TIMEOUT),
            retention_period=core.Duration.days(14))

        ######################################################################
        # Setup Lambda
        ######################################################################
        # setup lambda of es_loader
        lambda_es_loader_vpc_kwargs = {}
        if vpc_type:
            lambda_es_loader_vpc_kwargs = {
                'security_group': sg_vpc_noinbound_aes_siem,
                'vpc': vpc_aes_siem,
                'vpc_subnets': vpc_subnets,
            }

        lambda_es_loader = aws_lambda.Function(
            self, 'LambdaEsLoader', **lambda_es_loader_vpc_kwargs,
            function_name='aes-siem-es-loader',
            description=f'{SOLUTION_NAME} / es-loader',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            architecture=aws_lambda.Architecture.X86_64,
            # architecture=region_mapping.find_in_map(
            #    core.Aws.REGION, 'LambdaArm'),
            # code=aws_lambda.Code.asset('../lambda/es_loader.zip'),
            code=aws_lambda.Code.asset('../lambda/es_loader'),
            handler='index.lambda_handler',
            memory_size=2048,
            timeout=core.Duration.seconds(ES_LOADER_TIMEOUT),
            reserved_concurrent_executions=(
                reserved_concurrency.value_as_number),
            dead_letter_queue_enabled=True,
            dead_letter_queue=sqs_aes_siem_dlq,
            environment={
                'GEOIP_BUCKET': s3bucket_name_geo, 'LOG_LEVEL': 'info',
                'POWERTOOLS_LOGGER_LOG_EVENT': 'false',
                'POWERTOOLS_SERVICE_NAME': 'es-loader',
                'POWERTOOLS_METRICS_NAMESPACE': 'SIEM'})
        es_loader_newver = lambda_es_loader.add_version(
            name=__version__, description=__version__)
        es_loader_opt = es_loader_newver.node.default_child.cfn_options
        es_loader_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        # send only
        # sqs_aes_siem_dlq.grant(lambda_es_loader, 'sqs:SendMessage')
        # send and reieve. but it must be loop
        sqs_aes_siem_dlq.grant(
            lambda_es_loader, 'sqs:SendMessage', 'sqs:ReceiveMessage',
            'sqs:DeleteMessage', 'sqs:GetQueueAttributes')

        sqs_aes_siem_splitted_logs.grant(
            lambda_es_loader, 'sqs:SendMessage', 'sqs:ReceiveMessage',
            'sqs:DeleteMessage', 'sqs:GetQueueAttributes')

        lambda_es_loader.add_event_source(
            aws_lambda_event_sources.SqsEventSource(
                sqs_aes_siem_splitted_logs, batch_size=1))

        # es-loaer on EC2 role
        sqs_aes_siem_dlq.grant(
            aes_siem_es_loader_ec2_role, 'sqs:GetQueue*', 'sqs:ListQueues*',
            'sqs:ReceiveMessage*', 'sqs:DeleteMessage*')

        lambda_geo = aws_lambda.Function(
            self, 'LambdaGeoipDownloader',
            function_name='aes-siem-geoip-downloader',
            description=f'{SOLUTION_NAME} / geoip-downloader',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            architecture=aws_lambda.Architecture.X86_64,
            # architecture=region_mapping.find_in_map(
            #    core.Aws.REGION, 'LambdaArm'),
            code=aws_lambda.Code.asset('../lambda/geoip_downloader'),
            handler='index.lambda_handler',
            memory_size=320,
            timeout=core.Duration.seconds(300),
            environment={
                's3bucket_name': s3bucket_name_geo,
                'license_key': geoip_license_key.value_as_string,
            }
        )
        lambda_geo_newver = lambda_geo.add_version(
            name=__version__, description=__version__)
        lamba_geo_opt = lambda_geo_newver.node.default_child.cfn_options
        lamba_geo_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        ######################################################################
        # setup OpenSearch Service
        ######################################################################
        lambda_deploy_es = aws_lambda.Function(
            self, 'LambdaDeployAES',
            function_name='aes-siem-deploy-aes',
            description=f'{SOLUTION_NAME} / opensearch domain deployment',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            architecture=aws_lambda.Architecture.X86_64,
            # architecture=region_mapping.find_in_map(
            #    core.Aws.REGION, 'LambdaArm'),
            # code=aws_lambda.Code.asset('../lambda/deploy_es.zip'),
            code=aws_lambda.Code.asset('../lambda/deploy_es'),
            handler='index.aes_domain_handler',
            memory_size=128,
            timeout=core.Duration.seconds(300),
            environment={
                'accountid': core.Aws.ACCOUNT_ID,
                'aes_domain_name': aes_domain_name,
                'aes_admin_role': aes_siem_deploy_role_for_lambda.role_arn,
                'es_loader_role': lambda_es_loader.role.role_arn,
                'allow_source_address': allow_source_address.value_as_string,
            },
            role=aes_siem_deploy_role_for_lambda,
        )
        lambda_deploy_es.add_environment(
            's3_snapshot', s3_snapshot.bucket_name)
        if vpc_type:
            lambda_deploy_es.add_environment(
                'vpc_subnet_id', subnet1.subnet_id)
            lambda_deploy_es.add_environment(
                'security_group_id', sg_vpc_aes_siem.security_group_id)
        else:
            lambda_deploy_es.add_environment('vpc_subnet_id', 'None')
            lambda_deploy_es.add_environment('security_group_id', 'None')
        deploy_es_newver = lambda_deploy_es.add_version(
            name=__version__, description=__version__)
        deploy_es_opt = deploy_es_newver.node.default_child.cfn_options
        deploy_es_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        # execute lambda_deploy_es to deploy Amaozon ES Domain
        aes_domain = aws_cloudformation.CfnCustomResource(
            self, 'AesSiemDomainDeployedR2',
            service_token=lambda_deploy_es.function_arn,)
        aes_domain.add_override('Properties.ConfigVersion', __version__)

        es_endpoint = aes_domain.get_att('es_endpoint').to_string()
        lambda_es_loader.add_environment('ES_ENDPOINT', es_endpoint)
        lambda_es_loader.add_environment(
            'SQS_SPLITTED_LOGS_URL', sqs_aes_siem_splitted_logs.queue_url)

        lambda_configure_es_vpc_kwargs = {}
        if vpc_type:
            lambda_configure_es_vpc_kwargs = {
                'security_group': sg_vpc_noinbound_aes_siem,
                'vpc': vpc_aes_siem,
                'vpc_subnets': aws_ec2.SubnetSelection(subnets=[subnet1, ]), }
        lambda_configure_es = aws_lambda.Function(
            self, 'LambdaConfigureAES', **lambda_configure_es_vpc_kwargs,
            function_name='aes-siem-configure-aes',
            description=f'{SOLUTION_NAME} / opensearch configuration',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            architecture=aws_lambda.Architecture.X86_64,
            # architecture=region_mapping.find_in_map(
            #    core.Aws.REGION, 'LambdaArm'),
            code=aws_lambda.Code.asset('../lambda/deploy_es'),
            handler='index.aes_config_handler',
            memory_size=128,
            timeout=core.Duration.seconds(300),
            environment={
                'accountid': core.Aws.ACCOUNT_ID,
                'aes_domain_name': aes_domain_name,
                'aes_admin_role': aes_siem_deploy_role_for_lambda.role_arn,
                'es_loader_role': lambda_es_loader.role.role_arn,
                'allow_source_address': allow_source_address.value_as_string,
                'es_endpoint': es_endpoint,
            },
            role=aes_siem_deploy_role_for_lambda,
        )
        lambda_configure_es.add_environment(
            's3_snapshot', s3_snapshot.bucket_name)
        if vpc_type:
            lambda_configure_es.add_environment(
                'vpc_subnet_id', subnet1.subnet_id)
            lambda_configure_es.add_environment(
                'security_group_id', sg_vpc_aes_siem.security_group_id)
        else:
            lambda_configure_es.add_environment('vpc_subnet_id', 'None')
            lambda_configure_es.add_environment('security_group_id', 'None')
        configure_es_newver = lambda_configure_es.add_version(
            name=__version__, description=__version__)
        configure_es_opt = configure_es_newver.node.default_child.cfn_options
        configure_es_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        aes_config = aws_cloudformation.CfnCustomResource(
            self, 'AesSiemDomainConfiguredR2',
            service_token=lambda_configure_es.function_arn,)
        aes_config.add_override('Properties.ConfigVersion', __version__)
        aes_config.add_depends_on(aes_domain)
        aes_config.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN

        es_arn = (f'arn:aws:es:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}'
                  f':domain/{aes_domain_name}')
        # grant permission to es_loader role
        inline_policy_to_load_entries_into_es = aws_iam.Policy(
            self, 'aes-siem-policy-to-load-entries-to-es',
            policy_name='aes-siem-policy-to-load-entries-to-es',
            statements=[
                aws_iam.PolicyStatement(
                    actions=['es:*'],
                    resources=[es_arn + '/*', ]),
            ]
        )
        lambda_es_loader.role.attach_inline_policy(
            inline_policy_to_load_entries_into_es)
        aes_siem_es_loader_ec2_role.attach_inline_policy(
            inline_policy_to_load_entries_into_es)

        # grant additional permission to es_loader role
        additional_kms_cmks = self.node.try_get_context('additional_kms_cmks')
        if additional_kms_cmks:
            inline_policy_access_to_additional_cmks = aws_iam.Policy(
                self, 'access_to_additional_cmks',
                policy_name='access_to_additional_cmks',
                statements=[
                    aws_iam.PolicyStatement(
                        actions=['kms:Decrypt'],
                        resources=sorted(set(additional_kms_cmks))
                    )
                ]
            )
            lambda_es_loader.role.attach_inline_policy(
                inline_policy_access_to_additional_cmks)
            aes_siem_es_loader_ec2_role.attach_inline_policy(
                inline_policy_access_to_additional_cmks)
        additional_buckets = self.node.try_get_context('additional_s3_buckets')

        if additional_buckets:
            buckets_list = []
            for bucket in additional_buckets:
                buckets_list.append(f'arn:aws:s3:::{bucket}')
                buckets_list.append(f'arn:aws:s3:::{bucket}/*')
            inline_policy_access_to_additional_buckets = aws_iam.Policy(
                self, 'access_to_additional_buckets',
                policy_name='access_to_additional_buckets',
                statements=[
                    aws_iam.PolicyStatement(
                        actions=['s3:GetObject*', 's3:GetBucket*', 's3:List*'],
                        resources=sorted(set(buckets_list))
                    )
                ]
            )
            lambda_es_loader.role.attach_inline_policy(
                inline_policy_access_to_additional_buckets)
            aes_siem_es_loader_ec2_role.attach_inline_policy(
                inline_policy_access_to_additional_buckets)

        kms_aes_siem.grant_decrypt(lambda_es_loader)
        kms_aes_siem.grant_decrypt(aes_siem_es_loader_ec2_role)

        ######################################################################
        # s3 notification and grant permisssion
        ######################################################################
        s3_geo.grant_read_write(lambda_geo)
        s3_geo.grant_read(lambda_es_loader)
        s3_geo.grant_read(aes_siem_es_loader_ec2_role)
        s3_log.grant_read(lambda_es_loader)
        s3_log.grant_read(aes_siem_es_loader_ec2_role)

        # create s3 notification for es_loader
        notification = aws_s3_notifications.LambdaDestination(lambda_es_loader)

        # assign notification for the s3 PUT event type
        # most log system use PUT, but also CLB use POST & Multipart Upload
        s3_log.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED, notification,
            aws_s3.NotificationKeyFilter(prefix='AWSLogs/'))

        # For user logs, not AWS logs
        s3_log.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED, notification,
            aws_s3.NotificationKeyFilter(prefix='UserLogs/'))

        # Download geoip to S3 once by executing lambda_geo
        get_geodb = aws_cloudformation.CfnCustomResource(
            self, 'ExecLambdaGeoipDownloader',
            service_token=lambda_geo.function_arn,)
        get_geodb.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN

        # Download geoip every day at 6PM UTC
        rule = aws_events.Rule(
            self, 'CwlRuleLambdaGeoipDownloaderDilly',
            schedule=aws_events.Schedule.rate(core.Duration.hours(12)))
        rule.add_target(aws_events_targets.LambdaFunction(lambda_geo))

        ######################################################################
        # bucket policy
        ######################################################################
        s3_awspath = s3_log.bucket_arn + '/AWSLogs/' + core.Aws.ACCOUNT_ID
        bucket_policy_common1 = aws_iam.PolicyStatement(
            sid='ELB Policy',
            principals=[aws_iam.AccountPrincipal(
                account_id=region_mapping.find_in_map(
                    core.Aws.REGION, 'ElbV2AccountId'))],
            actions=['s3:PutObject'], resources=[s3_awspath + '/*'],)
        # NLB / ALB / R53resolver / VPC Flow Logs
        bucket_policy_elb1 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryAclCheck For ALB NLB R53Resolver Flowlogs',
            principals=[aws_iam.ServicePrincipal(
                'delivery.logs.amazonaws.com')],
            actions=['s3:GetBucketAcl', 's3:ListBucket'],
            resources=[s3_log.bucket_arn],)
        bucket_policy_elb2 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryWrite For ALB NLB R53Resolver Flowlogs',
            principals=[aws_iam.ServicePrincipal(
                'delivery.logs.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_awspath + '/*'],
            conditions={
                'StringEquals': {'s3:x-amz-acl': 'bucket-owner-full-control'}})
        s3_log.add_to_resource_policy(bucket_policy_common1)
        s3_log.add_to_resource_policy(bucket_policy_elb1)
        s3_log.add_to_resource_policy(bucket_policy_elb2)

        # CloudTrail
        bucket_policy_trail1 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryAclCheck For Cloudtrail',
            principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
            actions=['s3:GetBucketAcl'], resources=[s3_log.bucket_arn],)
        bucket_policy_trail2 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryWrite For CloudTrail',
            principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_awspath + '/*'],
            conditions={
                'StringEquals': {'s3:x-amz-acl': 'bucket-owner-full-control'}})
        s3_log.add_to_resource_policy(bucket_policy_trail1)
        s3_log.add_to_resource_policy(bucket_policy_trail2)

        # GuardDuty
        bucket_policy_gd1 = aws_iam.PolicyStatement(
            sid='Allow GuardDuty to use the getBucketLocation operation',
            principals=[aws_iam.ServicePrincipal('guardduty.amazonaws.com')],
            actions=['s3:GetBucketLocation'], resources=[s3_log.bucket_arn],)
        bucket_policy_gd2 = aws_iam.PolicyStatement(
            sid='Allow GuardDuty to upload objects to the bucket',
            principals=[aws_iam.ServicePrincipal('guardduty.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_log.bucket_arn + '/*'],)
        bucket_policy_gd5 = aws_iam.PolicyStatement(
            sid='Deny non-HTTPS access', effect=aws_iam.Effect.DENY,
            actions=['s3:*'], resources=[s3_log.bucket_arn + '/*'],
            conditions={'Bool': {'aws:SecureTransport': 'false'}})
        bucket_policy_gd5.add_any_principal()
        s3_log.add_to_resource_policy(bucket_policy_gd1)
        s3_log.add_to_resource_policy(bucket_policy_gd2)
        s3_log.add_to_resource_policy(bucket_policy_gd5)

        # Config
        bucket_policy_config1 = aws_iam.PolicyStatement(
            sid='AWSConfig BucketPermissionsCheck and BucketExistenceCheck',
            principals=[aws_iam.ServicePrincipal('config.amazonaws.com')],
            actions=['s3:GetBucketAcl', 's3:ListBucket'],
            resources=[s3_log.bucket_arn],)
        bucket_policy_config2 = aws_iam.PolicyStatement(
            sid='AWSConfigBucketDelivery',
            principals=[aws_iam.ServicePrincipal('config.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_awspath + '/Config/*'],
            conditions={
                'StringEquals': {'s3:x-amz-acl': 'bucket-owner-full-control'}})
        s3_log.add_to_resource_policy(bucket_policy_config1)
        s3_log.add_to_resource_policy(bucket_policy_config2)

        # geoip
        bucket_policy_geo1 = aws_iam.PolicyStatement(
            sid='Allow geoip downloader and es-loader to read/write',
            principals=[lambda_es_loader.role, lambda_geo.role],
            actions=['s3:PutObject', 's3:GetObject', 's3:DeleteObject'],
            resources=[s3_geo.bucket_arn + '/*'],)
        s3_geo.add_to_resource_policy(bucket_policy_geo1)

        # ES Snapshot
        bucket_policy_snapshot = aws_iam.PolicyStatement(
            sid='Allow ES to store snapshot',
            principals=[aes_siem_snapshot_role],
            actions=['s3:PutObject', 's3:GetObject', 's3:DeleteObject'],
            resources=[s3_snapshot.bucket_arn + '/*'],)
        s3_snapshot.add_to_resource_policy(bucket_policy_snapshot)

        ######################################################################
        # for multiaccount / organizaitons
        ######################################################################
        if org_id or no_org_ids:
            ##################################################################
            # KMS key policy for multiaccount / organizaitons
            ##################################################################
            # for CloudTrail
            cond_tail2 = self.make_resource_list(
                path='arn:aws:cloudtrail:*:', tail=':trail/*',
                keys=self.list_without_none(org_mgmt_id, no_org_ids))
            key_policy_mul_trail2 = aws_iam.PolicyStatement(
                sid=('Allow CloudTrail to encrypt logs for multiaccounts'),
                actions=['kms:GenerateDataKey*'],
                principals=[aws_iam.ServicePrincipal(
                    'cloudtrail.amazonaws.com')],
                resources=['*'],
                conditions={'StringLike': {
                    'kms:EncryptionContext:aws:cloudtrail:arn': cond_tail2}})
            kms_aes_siem.add_to_resource_policy(key_policy_mul_trail2)

            # for replicaiton
            key_policy_rep1 = aws_iam.PolicyStatement(
                sid=('Enable cross account encrypt access for S3 Cross Region '
                     'Replication'),
                actions=['kms:Encrypt'],
                principals=self.make_account_principals(
                    org_mgmt_id, org_member_ids, no_org_ids),
                resources=['*'],)
            kms_aes_siem.add_to_resource_policy(key_policy_rep1)

            ##################################################################
            # Buckdet Policy for multiaccount / organizaitons
            ##################################################################
            s3_log_bucket_arn = 'arn:aws:s3:::' + s3bucket_name_log

            # for CloudTrail
            s3_mulpaths = self.make_resource_list(
                path=f'{s3_log_bucket_arn}/AWSLogs/', tail='/*',
                keys=self.list_without_none(org_id, org_mgmt_id, no_org_ids))
            bucket_policy_org_trail = aws_iam.PolicyStatement(
                sid='AWSCloudTrailWrite for Multiaccounts / Organizations',
                principals=[
                    aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
                actions=['s3:PutObject'], resources=s3_mulpaths,
                conditions={'StringEquals': {
                    's3:x-amz-acl': 'bucket-owner-full-control'}})
            s3_log.add_to_resource_policy(bucket_policy_org_trail)

            # config
            s3_conf_multpaths = self.make_resource_list(
                path=f'{s3_log_bucket_arn}/AWSLogs/', tail='/Config/*',
                keys=self.list_without_none(org_id, org_mgmt_id, no_org_ids))
            bucket_policy_mul_config2 = aws_iam.PolicyStatement(
                sid='AWSConfigBucketDelivery',
                principals=[aws_iam.ServicePrincipal('config.amazonaws.com')],
                actions=['s3:PutObject'], resources=s3_conf_multpaths,
                conditions={'StringEquals': {
                    's3:x-amz-acl': 'bucket-owner-full-control'}})
            s3_log.add_to_resource_policy(bucket_policy_mul_config2)

            # for replication
            bucket_policy_rep1 = aws_iam.PolicyStatement(
                sid='PolicyForDestinationBucket / Permissions on objects',
                principals=self.make_account_principals(
                    org_mgmt_id, org_member_ids, no_org_ids),
                actions=['s3:ReplicateDelete', 's3:ReplicateObject',
                         's3:ReplicateTags', 's3:GetObjectVersionTagging',
                         's3:ObjectOwnerOverrideToBucketOwner'],
                resources=[f'{s3_log_bucket_arn}/*'])
            bucket_policy_rep2 = aws_iam.PolicyStatement(
                sid='PolicyForDestinationBucket / Permissions on bucket',
                principals=self.make_account_principals(
                    org_mgmt_id, org_member_ids, no_org_ids),
                actions=['s3:List*', 's3:GetBucketVersioning',
                         's3:PutBucketVersioning'],
                resources=[f'{s3_log_bucket_arn}'])
            s3_log.add_to_resource_policy(bucket_policy_rep1)
            s3_log.add_to_resource_policy(bucket_policy_rep2)

        ######################################################################
        # SNS topic for Amazon OpenSearch Service Alert
        ######################################################################
        sns_topic = aws_sns.Topic(
            self, 'SnsTopic', topic_name='aes-siem-alert',
            display_name='AES SIEM')

        sns_topic.add_subscription(aws_sns_subscriptions.EmailSubscription(
            email_address=sns_email.value_as_string))
        sns_topic.grant_publish(aes_siem_sns_role)

        ######################################################################
        # output of CFn
        ######################################################################
        kibanaurl = f'https://{es_endpoint}/_dashboards/'
        kibanaadmin = aes_domain.get_att('kibanaadmin').to_string()
        kibanapass = aes_domain.get_att('kibanapass').to_string()

        core.CfnOutput(self, 'RoleDeploy', export_name='role-deploy',
                       value=aes_siem_deploy_role_for_lambda.role_arn)
        core.CfnOutput(self, 'DashboardsUrl', export_name='dashboards-url',
                       value=kibanaurl)
        core.CfnOutput(self, 'DashboardsPassword',
                       export_name='dashboards-pass', value=kibanapass,
                       description=('Please change the password in OpenSearch '
                                    'Dashboards ASAP'))
        core.CfnOutput(self, 'DashboardsAdminID',
                       export_name='dashboards-admin', value=kibanaadmin)
Exemplo n.º 30
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        pvt_bkt = s3.Bucket(
            self,
            "abacBucket",
            versioned=True,
            # encryption=s3.BucketEncryption.KMS_MANAGED,
            block_public_access=s3.BlockPublicAccess(block_public_policy=True),
            removal_policy=core.RemovalPolicy.DESTROY
            )

        pvt_bkt.add_to_resource_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                # actions=["s3:GetObject"],
                actions=["s3:*"],
                # resources=[pvt_bkt.arn_for_objects("file.txt")],
                resources=[pvt_bkt.arn_for_objects("*")],
                principals=[iam.AccountRootPrincipal()]
            )
        )
        # Create 3 Users: 1 Admin & 2 Normal Users

        # Lets generate a password for our user
        redRosy_new_pass = random_string_generator(
            self,
            "redRosyNewPasswordGenerator",
            Length=20
        )

        redRosy = iam.User(
            self,
            "redRosy",
            user_name="redRosy",
            password=core.SecretValue.plain_text(redRosy_new_pass.response)
        )

        blueBob_new_pass = random_string_generator(
            self,
            "blueBobNewPasswordGenerator",
            Length=20
        )

        blueBob = iam.User(
            self,
            "blueBob",
            user_name="blueBob",
            password=core.SecretValue.plain_text(blueBob_new_pass.response)
        )

        annoyingAdmin_new_pass = random_string_generator(
            self,
            "annoyingAdminNewPasswordGenerator",
            Length=20
        )

        annoyingAdmin = iam.User(
            self,
            "annoyingAdmin",
            user_name="annoyingAdmin",
            password=core.SecretValue.plain_text(annoyingAdmin_new_pass.response)
        )

        teamUnicornGrp = iam.Group(
            self,
            "teamUnicorn",
            group_name="teamUnicorn"
        )

        # Add Users To Group
        teamUnicornGrp.add_user(redRosy)
        teamUnicornGrp.add_user(blueBob)
        teamUnicornGrp.add_user(annoyingAdmin)

        # blueGrp1.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("AmazonS3ReadOnlyAccess"))
        ##############################################
        # We need a custom resource to TAG IAM Users #
        ##############################################

        iamUserTaggerResp = iam_user_tagger(
            self, "iamTagger",
            message=[
                {"user":redRosy.user_name, "tags":[{'Key': 'teamName','Value':'teamUnicorn'},
                                                    {'Key': 'projectName','Value':'projectRed'}
                                                    ]
                },
                {"user":blueBob.user_name, "tags":[{'Key': 'teamName','Value':'teamUnicorn'},
                                                    {'Key': 'projectName','Value':'projectBlue'}
                                                    ]
                },
                {"user":annoyingAdmin.user_name, "tags":[{'Key': 'teamName','Value':'teamUnicorn'},
                                                    {'Key': 'teamAdmin','Value':'yes'}
                                                    ]
                }
            ]
        )

        """
        resource = MyCustomResource(
            self, "iamTagger",
            message=[
                {"user":redRosy.user_name, "tags":[{'Key': 'teamName','Value':'teamUnicorn'},
                                                    {'Key': 'projectName','Value':'projectRed'}
                                                    ]
                },
                {"user":blueBob.user_name, "tags":[{'Key': 'teamName','Value':'teamUnicorn'},
                                                    {'Key': 'projectName','Value':'projectBlue'}
                                                    ]
                },
                {"user":annoyingAdmin.user_name, "tags":[{'Key': 'teamName','Value':'teamUnicorn'},
                                                    {'Key': 'teamAdmin','Value':'yes'}
                                                    ]
                }
            ]
        )
        """

        # Lets Create the IAM Role
        # Uses belonging to this group, will be able to asume this role
        accountId=core.Aws.ACCOUNT_ID
        teamUnicornProjectRedRole = iam.Role(
            self,
            'teamUnicornProjectRedRoleId',
            assumed_by=iam.AccountPrincipal(f"{accountId}"),
            role_name="teamUnicornProjectRedRole"
        )
        core.Tag.add(teamUnicornProjectRedRole, key="teamName",value="teamUnicorn")
        core.Tag.add(teamUnicornProjectRedRole, key="projectName",value="projectRed")

        teamUnicornProjectBlueRole = iam.Role(
            self,
            'teamUnicornProjectBlueRoleId',
            assumed_by=iam.AccountPrincipal(f"{accountId}"),
            role_name="teamUnicornProjectBlueRole"
        )
        core.Tag.add(teamUnicornProjectBlueRole, key="teamName",value="teamUnicorn")
        core.Tag.add(teamUnicornProjectBlueRole, key="projectName",value="projectBlue")

        teamUnicornTeamAdminRole = iam.Role(
            self,
            'teamUnicornTeamAdminRoleId',
            assumed_by=iam.AccountPrincipal(f"{accountId}"),
            role_name="teamUnicornTeamAdminRole"
        )
        core.Tag.add(teamUnicornTeamAdminRole, key="teamName",value="teamUnicorn")
        core.Tag.add(teamUnicornTeamAdminRole, key="teamAdmin",value="yes")

        # Allow Group to Assume Role
        grpStmt1=iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                resources=[f"arn:aws:iam::{accountId}:role/teamUnicornProject*"],
                actions=["sts:AssumeRole"],
                conditions={ "StringEquals": { "iam:ResourceTag/teamName": "${aws:PrincipalTag/teamName}",
                                               "iam:ResourceTag/projectName": "${aws:PrincipalTag/projectName}" 
                                            }
                        }
            )
        grpStmt1.sid="AllowGroupMembersToAssumeRoleMatchingTeamName"

        grpStmt2=iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                resources=[f"arn:aws:iam::{accountId}:role/teamUnicornTeamAdminRole"],
                actions=["sts:AssumeRole"],
                conditions={ "StringEquals": { "iam:ResourceTag/teamName": "${aws:PrincipalTag/teamName}",
                                               "iam:ResourceTag/teamAdmin": "yes"
                                            }
                        }
            )
        grpStmt2.sid="AllowTeamAdminToAssumeRoleMatchingTeamName"
        teamUnicornGrp.add_to_policy( grpStmt1 )
        teamUnicornGrp.add_to_policy( grpStmt2 )

        # Add Permissions to the Role
        roleStmt1=iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                resources=["*"],
                actions=["s3:ListAllMyBuckets", "s3:HeadBucket"]
            )
        roleStmt1.sid="AllowGroupToSeeBucketListInTheConsole"

        roleStmt2=iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                resources=[pvt_bkt.bucket_arn],
                actions=["s3:ListBucket","s3:ListBucketVersions"],
                # Below condition can be used to enable listing a particular prefix in another statement
                # conditions={ "StringEquals" : { "s3:prefix":[""], "s3:delimiter":["/"] } }
            )
        roleStmt2.sid="AllowRootLevelListingOfBucket"

        roleStmt3=iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                resources=[pvt_bkt.arn_for_objects("*")],
                actions=["s3:Get*","s3:DeleteObjectTagging"],
                conditions={ "StringEquals": { "s3:ExistingObjectTag/teamName" : "${aws:PrincipalTag/teamName}",
                                               "s3:ExistingObjectTag/projectName" : "${aws:PrincipalTag/projectName}" 
                                            }
                        }
            )
        roleStmt3.sid="ReadOnlyAccessToTeams"

        roleStmt4=iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                resources=[pvt_bkt.arn_for_objects("*")],
                actions=["s3:PutObject","s3:PutObjectTagging","s3:PutObjectVersionTagging"],
                conditions={ "StringEquals": { "s3:RequestObjectTag/teamName" : "${aws:PrincipalTag/teamName}",
                                               "s3:RequestObjectTag/projectName" : "${aws:PrincipalTag/projectName}" 
                                            }
                        }
            )
        roleStmt4.sid="WriteTaggedObjectOwnedByThem"

        roleStmt5=iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                resources=[pvt_bkt.bucket_arn, pvt_bkt.arn_for_objects("*")],
                actions=["s3:*"],
                conditions={ 
                    "StringEquals" : { 
                        "${aws:PrincipalTag/teamAdmin}": [ "yes" ]
                    }
                }
            )
        roleStmt5.sid="FullAccessToAdminsFromSameTeam"

        teamUnicornProjectRedRole.add_to_policy( roleStmt1 )
        teamUnicornProjectRedRole.add_to_policy( roleStmt2 )
        teamUnicornProjectRedRole.add_to_policy( roleStmt3 )
        teamUnicornProjectRedRole.add_to_policy( roleStmt4 )
        teamUnicornProjectRedRole.add_to_policy( roleStmt5 )

        # Add same permissions to projectBlueRole
        teamUnicornProjectBlueRole.add_to_policy( roleStmt1 )
        teamUnicornProjectBlueRole.add_to_policy( roleStmt2 )
        teamUnicornProjectBlueRole.add_to_policy( roleStmt3 )
        teamUnicornProjectBlueRole.add_to_policy( roleStmt4 )
        teamUnicornProjectBlueRole.add_to_policy( roleStmt5 )

        # Add same permissions to teamAdminRole
        teamUnicornTeamAdminRole.add_to_policy( roleStmt1 )
        teamUnicornTeamAdminRole.add_to_policy( roleStmt2 )
        teamUnicornTeamAdminRole.add_to_policy( roleStmt3 )
        teamUnicornTeamAdminRole.add_to_policy( roleStmt4 )
        teamUnicornTeamAdminRole.add_to_policy( roleStmt5 )


        ###########################################
        ################# OUTPUTS #################
        ###########################################

        output0 = core.CfnOutput(self,
            "SecuirtyAutomationFrom",
            value=f"{global_args.SOURCE_INFO}",
            description="To know more about this automation stack, check out our github page."
        )

        output1_r = core.CfnOutput(self,
            "User:redRosy",
            value=redRosy_new_pass.response,
            description=f"Red Rosy User Password"
        )
        output1_b = core.CfnOutput(self,
            "User:blueBob",
            value=blueBob_new_pass.response,
            description=f"Red Rosy User Password"
        )
        output1_a = core.CfnOutput(self,
            "User:annoyingAdmin",
            value=annoyingAdmin_new_pass.response,
            description=f"Red Rosy User Password"
        )

        output2 = core.CfnOutput(self,
            "SecurePrivateBucket",
            value=(
                    f"https://console.aws.amazon.com/s3/buckets/"
                    f"{pvt_bkt.bucket_name}"
                ),
            description=f"S3 Bucket to Test ABAC"
        )

        output3 = core.CfnOutput(self,
            "Rosy-Assume-RedRole-Url",
            value=(
                    f"https://signin.aws.amazon.com/switchrole?roleName="
                    f"{teamUnicornProjectRedRole.role_name}"
                    f"&account="
                    f"{core.Aws.ACCOUNT_ID}"
                ),
            description=f"The URL for Rosy to assume teamRed Role"
        )


        output4 = core.CfnOutput(self,
            "blueBob-Assume-RedRole-Url",
            value=(
                    f"https://signin.aws.amazon.com/switchrole?roleName="
                    f"{teamUnicornProjectBlueRole.role_name}"
                    f"&account="
                    f"{core.Aws.ACCOUNT_ID}"
                ),
            description=f"The URL for Bob to assume teamBlue Role"
        )

        output5 = core.CfnOutput(self,
            "SampleS3UploadCommands",
            value=(
                    f"aws s3api put-object-tagging --bucket {pvt_bkt.bucket_name} --key YOUR-OBJECT --tagging 'TagSet=[{{Key=projectName,Value=teamRed}}]'"
                ),
            description=f"For ProjectRed"
        )

        output10 = core.CfnOutput(self,
            "User-Login-Url",
            value=(
                    f"https://{core.Aws.ACCOUNT_ID}.signin.aws.amazon.com/console"
                ),
            description=f"The URL for Rosy to assume teamRed Role"
        )