示例#1
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        prj_name = self.node.try_get_context("project_name")
        env_name = self.node.try_get_context("env")

        self.kms_rds = kms.Key(self,"dbkey",
            enable_key_rotation=True,
            description="{}-{}-key-rds".format(prj_name,env_name)
        )
        self.kms_rds.add_alias(
            alias_name="alias/{}-{}-key-rds".format(prj_name,env_name)
        )

        kms_redis = kms.Key(self,"rediskey",
            enable_key_rotation=True,
            description="{}-{}-key-redis".format(prj_name,env_name)
        )

        kms_redis.add_alias(
            alias_name="alias/{}-{}-key-redis".format(prj_name,env_name)
        )

        kms_lambda = kms.Key(self,"lambdakey",
            enable_key_rotation=True,
            description="{}-{}-key-lambda".format(prj_name,env_name)
        )
        kms_lambda.add_alias(
            alias_name="alias/{}-{}-key-lambda".format(prj_name,env_name)
        )

        

        core.CfnOutput(self,"rdskeyexport",
            value=self.kms_rds.key_arn
        )

        core.CfnOutput(self,"lambdakeyexport",
            value=kms_lambda.key_arn,
            export_name="lambda-kms-key"
        )

        core.CfnOutput(self,"rediskeyexport",
            value=kms_redis.key_arn,
            export_name="redis-kms-key"
        )
        
    
        
        
        
            
示例#2
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # CloudTrailの暗号化キーを作成
        trail_key = kms.Key(self,
                            'TrailKey',
                            alias='TrailKey',
                            enable_key_rotation=True)

        # 証跡を作成
        cloudtrail.Trail(self,
                         'trail',
                         trail_name='trail',
                         encryption_key=trail_key,
                         send_to_cloud_watch_logs=True)

        # CloudTrailに暗号化権限を付与するポリシーを定義
        key_policy = iam.PolicyStatement()
        key_policy.add_service_principal("cloudtrail.amazonaws.com")
        key_policy.add_actions("kms:GenerateDataKey*")
        key_policy.add_all_resources()
        key_policy.add_condition(
            'StringLike', {
                'kms:EncryptionContext:aws:cloudtrail:arn':
                'arn:aws:cloudtrail:*:' + self.account + ':trail/*'
            })

        # 暗号化キーにポリシーを付与
        trail_key.add_to_resource_policy(key_policy)
示例#3
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        kms_policy = iam.PolicyDocument()
        kms_policy.add_statements(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                actions=["kms:*"],
                                resources=['*'],
                                principals=[iam.AccountPrincipal(account_id=self.account),
                                            iam.ServicePrincipal(service="lambda.amazonaws.com")]
                                )
        )

        self._kinesis_key = kms.Key(self,
                               "volumeKey",
                               enable_key_rotation=True,
                               policy=kms_policy,
                               removal_policy=core.RemovalPolicy.RETAIN
                               )

        self._kinesis_stream = kinesis.Stream(self,
                                              id,
                                              encryption_key=self.kinesis_key,
                                              retention_period=core.Duration.hours(24),
                                              shard_count=1,
                                              stream_name="PaymentStream"
                                              )
    def __init__(self, scope: core.Construct, id: str, common_stack: CommonStack, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self._supported_in_region = self.is_service_supported_in_region()

        # Create policy for KMS key
        policy = iam.PolicyDocument(
            statements=[
                iam.PolicyStatement(
                    actions=["kms:*"],
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                    principals=[iam.AccountPrincipal(core.Aws.ACCOUNT_ID)],
                )
            ]
        )

        # Create KMS key for S3 server-side encryption
        key = kms.Key(self, "s3SSETestkmsKey", policy=policy)

        # Create S3 bucket for SSE testing
        bucket = s3.Bucket(
            self, "s3TestBucket", encryption=s3.BucketEncryption.KMS, encryption_key=key
        )

        # Create SSM parameters for the KMS key id, KMS bucket name and region
        self._parameters_to_save = {
            "sse_kms_key_id": key.key_id,
            "bucket_with_sse_kms_enabled": bucket.bucket_name,
            "bucket_with_sse_kms_region": core.Aws.REGION,
        }
        self.save_parameters_in_parameter_store(platform=Platform.ANDROID)

        common_stack.add_to_common_role_policies(self)
示例#5
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = ec2.Vpc(self,
                      'vpc1'
                      )

        bucket_name = 'my-cdk-bucket'
        s3.Bucket(self,
                  bucket_name,
                  bucket_name=bucket_name,
                  access_control=s3.BucketAccessControl.PUBLIC_READ_WRITE,
                  removal_policy=RemovalPolicy.DESTROY)

        ec2.Volume(self, 'vol1', availability_zone='us-east-1a', size=core.Size.gibibytes(8))

        sg = ec2.SecurityGroup(self,
                               'sg1',
                               vpc=vpc)
        sg.add_ingress_rule(Peer.any_ipv4(), Port.tcp(22))

        kms.Key(self, 'kms1')

        rds.DatabaseInstance(self,
                             'rds1',
                             engine=rds.DatabaseInstanceEngine.postgres(version=PostgresEngineVersion.VER_12),
                             master_username='******',
                             vpc=vpc,
                             vpc_placement=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC))
示例#6
0
    def __init__(
            self,
            app: core.App,
            construct_id: str,
            authorized_users: List[iam.User],
            secret_placeholders=None,
            predefined_secrets=None) -> None:
        '''
        :param authorized_users: Users authorized for 'main' access to all of the secrets both placeholders and predefined
        :param secret_placeholders: Secrets that will need to be updated at a later time
        :param predefined_secrets: Secrets that already have determined secret values
        '''

        super().__init__(app, construct_id)
        self.undefined_secrets = {}
        self.defined_secrets = {}
        self.authorized_users = authorized_users
        self.kms_key = kms.Key(
            self,
            "KMSKey",
            alias=app.node.id,
            removal_policy=core.RemovalPolicy.DESTROY,
            trust_account_identities=True
        )
        self.create_predefined_secrets(predefined_secrets) if predefined_secrets else None
        self.create_placeholders(secret_placeholders) if secret_placeholders else None
 def GenerateKmsKey(self,ZachStreamName):
     KeyName=ZachStreamName + "KmsKey"
     ZachKey= kms.Key(self, id=KeyName,
                      description="KMS Key for kinesis " + ZachStreamName,
                      enabled=True, enable_key_rotation=True)
     ZachKey.add_alias(KeyName)
     core.CfnOutput(self, KeyName + "ARN", value=ZachKey.key_arn)
     core.CfnOutput(self, KeyName + "ID", value=ZachKey.key_id)
     return ZachKey
示例#8
0
    def __init__(self, scope: core.Construct, id: builtins.str,
                 landing_zone: IVpcLandingZone) -> None:
        super().__init__(scope, id)
        self.__landing_zone = landing_zone

        # Setup DNS...
        self.trader_dns_zone = r53.PrivateHostedZone(
            self,
            'Trader',
            zone_name='trader.fsi'.format(landing_zone.zone_name.lower()),
            vpc=landing_zone.vpc,
            comment='HomeNet Financial Services Domain')

        # Create a key and delegate access to IAM...
        self.key = kms.Key(
            self,
            'Key',
            alias='homenet/fsi',
            enable_key_rotation=True,
            policy=iam.PolicyDocument(statements=[
                iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                    principals=[
                                        iam.AccountPrincipal(
                                            core.Stack.of(self).account)
                                    ],
                                    actions=['kms:*'],
                                    resources=['*'])
            ]))

        # Create central resources...
        self.tda_secret = sm.Secret(
            self,
            'AmeritradeSecrets',
            removal_policy=core.RemovalPolicy.DESTROY,
            secret_name='HomeNet-{}-Ameritrade-Secrets'.format(
                self.landing_zone.zone_name))

        self.bucket = s3.Bucket(self,
                                'Bucket',
                                bucket_name='homenet-{}.{}.trader.fsi'.format(
                                    self.landing_zone.zone_name,
                                    core.Stack.of(self).region).lower(),
                                versioned=True)

        r53.ARecord(self,
                    'BucketAlias',
                    zone=self.trader_dns_zone,
                    record_name=self.bucket.bucket_domain_name,
                    target=r53.RecordTarget.from_alias(
                        dns_targets.BucketWebsiteTarget(self.bucket)))

        # self.fspace = space.CfnEnvironment(self,'Finspace',
        #   name='HomeNet-FsiCoreSvc',
        #   kms_key_id= self.key.key_id,
        #   description="HomeNet Financial Servicing Catalog")
        self.finspace = FinSpaceEnvironment()
        self.key.grant_admin(iam.ServicePrincipal(service='finspace'))
    def __init__(self, scope: core.Construct, id: str,
                 landing_zone: ILandingZone, **kwargs):
        """
    Configure Dns Resolver
    """
        super().__init__(scope, id, **kwargs)

        region = core.Stack.of(self).region

        self.encryption_key = kms.Key(
            self,
            'EncryptionKey',
            description='Encryption Key for BackupStrategy')

        self.topic = sns.Topic(self, 'Topic')
        self.role = iam.Role(self,
                             'Role',
                             description='Account Backup Role',
                             assumed_by=iam.ServicePrincipal(service='backup'))

        self.vault = backup.BackupVault(
            self,
            'Vault',
            encryption_key=self.encryption_key,
            notification_topic=self.topic,
            backup_vault_name='{}-Backup-Vault'.format(landing_zone.zone_name),
            access_policy=iam.PolicyDocument(statements=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                    actions=['backup:CopyIntoBackupVault'],
                    principals=[iam.ArnPrincipal(arn=self.role.role_arn)])
            ]))

        self.default_plan = backup.BackupPlan(
            self,
            'DefaultPlan',
            backup_vault=self.vault,
            backup_plan_name='Default Plan {} in {}'.format(
                landing_zone.zone_name, region),
            backup_plan_rules=[
                backup.BackupPlanRule.daily(),
                backup.BackupPlanRule.weekly(),
            ])

        self.default_plan.add_selection('SelectionPolicy',
                                        allow_restores=True,
                                        role=self.role,
                                        resources=[
                                            backup.BackupResource.from_tag(
                                                "landing_zone",
                                                landing_zone.zone_name),
                                        ])
    def __init__(self, scope: Construct, cid: str, **kwargs) -> None:
        super().__init__(scope, id=cid, **kwargs)

        key_name = "SwiftConnectivityCMK"
        self._cmk = _kms.Key(
            self,
            key_name,
            alias=key_name,
            description="Swift Connectivity CMK for use for all resources",
            enabled=True,
            enable_key_rotation=True,
            removal_policy=RemovalPolicy.DESTROY)
示例#11
0
  def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
    super().__init__(scope, id, **kwargs)
    
    self.build_key = kms.Key(self,'ArtifactKey',
      alias='finsurf/cicd',
      trust_account_identities=True,
      description='Encryption key for FinSurf build system',
      enable_key_rotation=True)

    ssm.StringParameter(self,'ArtifactKeyParameter',
      parameter_name='/app-FinSurf/artifacts/encryption_key_arn',
      string_value=self.build_key.key_arn,
      description='The active key for encrypting FinSurf artifacts')
示例#12
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.IVpc,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.encryptionKey = kms.Key(self, 'KubeClusterKey')
        self.cluster = eks.Cluster(
            self,
            'KubeCluster',
            version=eks.KubernetesVersion.V1_18,
            vpc=vpc,
            default_capacity=0,
            secrets_encryption_key=self.encryptionKey,
            endpoint_access=eks.EndpointAccess.PRIVATE,
            vpc_subnets=[ec2.SubnetSelection(subnet_group_name='Kubes')])
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        key = kms.Key(self,
                      'kms-key',
                      alias='devassoc-key',
                      description='Dev Cert Exercise key')
        admin_role = iam.User.from_user_name(self, 'admin-user', 'DevAdmin')
        key.grant_admin(admin_role)
        key.grant_encrypt_decrypt(admin_role)
        self.key_id = key.key_id

        core.CfnOutput(self, 'kms-key-id', value=key.key_id)
        core.CfnOutput(self, 'kms-key-arn', value=key.key_arn)
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        kms_policy = iam.PolicyDocument()
        kms_policy.add_statements(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                actions=["kms:*"],
                resources=['*'],
                principals=[iam.AccountPrincipal(account_id=self.account)]))

        redshift_key = kms.Key(self,
                               "volumeKey",
                               enable_key_rotation=True,
                               policy=kms_policy,
                               removal_policy=core.RemovalPolicy.RETAIN)

        redshift_bucket = s3.Bucket(self, "redshiftBucket")
        redshift_bucket.add_to_resource_policy(permission=iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=["s3:*"],
            resources=[
                f"{redshift_bucket.bucket_arn}/*", redshift_bucket.bucket_arn
            ],
            principals=[
                iam.ArnPrincipal(f"arn:aws:iam::193672423079:user/logs")
            ]))

        self._cluster = redshift.Cluster(
            self,
            id,
            master_user=redshift.Login(master_username="******",
                                       encryption_key=redshift_key),
            port=5439,
            vpc=vpc,
            cluster_name="dwh",
            cluster_type=redshift.ClusterType.MULTI_NODE,
            number_of_nodes=2,
            default_database_name="aml",
            encrypted=True,
            encryption_key=redshift_key,
            logging_bucket=redshift_bucket,
            logging_key_prefix="dwh",
            node_type=redshift.NodeType.DC2_LARGE,
            removal_policy=core.RemovalPolicy.DESTROY,
            security_groups=[self.redshift_sg(vpc)],
            vpc_subnets=ec2.SubnetSelection(subnet_group_name="DBS"))
示例#15
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        prj_name = self.node.try_get_context("project_name")
        env_name = self.node.try_get_context("env")

        self.kms_rds = kms.Key(self,
                               'rdskey',
                               description="{}-key-rds".format(prj_name),
                               enable_key_rotation=True)
        self.kms_rds.add_alias(alias_name='alias/{}-key-rds'.format(prj_name))

        #Create SSM parameter
        ssm.StringParameter(self,
                            'rdskey-param',
                            string_value=self.kms_rds.key_id,
                            parameter_name='/' + env_name + '/rds-kms-key')
示例#16
0
    def __init__(self, scope: core.Construct, id: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        prj_name = self.node.try_get_context('project_name')
        env_name = self.node.try_get_context('env')

        self.kms_rds = kms.Key(self,
                               'rds-key',
                               description=f'{prj_name}-key-rds',
                               enable_key_rotation=True)
        self.kms_rds.add_alias(alias_name=f'alias/{prj_name}-key-rds')

        kms_param = ssm.StringParameter(
            self,
            'kms-param',
            string_value=self.kms_rds.key_id,
            parameter_name=f'/{env_name}/rds-kms-keyId')
示例#17
0
    def __init__(self, scope: core.Construct, id: str, vpc, instance_type,
                 managed_worker_nodes_nubmer, cluster_name,
                 unmanaged_worker_nodes_number, spot_price, key_pair,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        #Create key pair that will be used for the K8S worker nodes
        self.key = KeyPair(
            self,
            "EKSKey",
            name=key_pair,
            description="This is a Key Pair for EKS worker nodes")
        #Create KMS key for secrets encryption
        self.kms_eks = kms.Key(
            self,
            'kms_eks',
            alias='kms_eks',
        )
        #Get the IAM role which will be added to the aws_auth
        # masters_role = iam.Role.from_role_arn(
        #     self, 'MasterRole',
        #     role_arn = masters_role
        # )

        #Create EKS cluster with managed/unmanaged worker nodes
        self.eks_cluster = eks.Cluster(
            self,
            'eks',
            cluster_name=cluster_name,
            version=eks.KubernetesVersion.V1_18,
            # masters_role = masters_role,
            default_capacity=managed_worker_nodes_nubmer,
            secrets_encryption_key=self.kms_eks,
            vpc=vpc)
        self.eks_role = self.eks_cluster.node.try_find_child('Role')
        self.eks_role.add_to_policy(statement=iam.PolicyStatement(
            actions=["ec2:DescribeVpcs"], resources=["*"]))
        if unmanaged_worker_nodes_number > 0:
            self.asg = self.eks_cluster.add_auto_scaling_group_capacity(
                "EKSAutoScalingGroup",
                instance_type=ec2.InstanceType(instance_type),
                spot_price=spot_price,
                desired_capacity=unmanaged_worker_nodes_number,
                key_name=self.key.name)
        self.asg.add_to_role_policy(
            iam.PolicyStatement(actions=["route53:*"], resources=["*"]))
 def _build_kms_key_for_env(self) -> None:
     administrator_arns: List[str] = [
     ]  # A place to add other admins if needed for KMS
     admin_principals = iam.CompositePrincipal(
         *[iam.ArnPrincipal(arn) for arn in administrator_arns],
         iam.ArnPrincipal(f"arn:aws:iam::{self.context.account_id}:root"),
     )
     self.env_kms_key: kms.Key = kms.Key(
         self,
         id="kms-key",
         removal_policy=core.RemovalPolicy.RETAIN,
         enabled=True,
         enable_key_rotation=True,
         policy=iam.PolicyDocument(statements=[
             iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                 actions=["kms:*"],
                                 resources=["*"],
                                 principals=[admin_principals])
         ]),
     )
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        #Create Role
        S3SqsKmsSampleStack.__Role = IamService.create_role(self)

        #get KMS policy Document
        kms_policy_document = IamService.get_kms_policy_documents(self)

        kms_key = kms.Key(self,
                          id='ssl_s3_sqs_kms_key',
                          alias='sslS3SqsKmsKey',
                          description='This is kms key',
                          enabled=True,
                          enable_key_rotation=True,
                          policy=kms_policy_document)

        #This will create the s3 bucket in AWS
        bucket = s3.Bucket(self,
                           "ssl_s3_bucket_raw_kms",
                           bucket_name="ssl-s3-bucket-kms-raw",
                           encryption=s3.BucketEncryption.KMS,
                           encryption_key=kms_key)

        #This will create the sqs in AWS
        queue = sqs.Queue(self,
                          "ssl_sqs_event_queue",
                          queue_name="ssl-sqs-kms-event-queue",
                          encryption=sqs.QueueEncryption.KMS,
                          encryption_master_key=kms_key)

        #queue.node.add_dependency(kms_key)
        bucket.node.add_dependency(queue, kms_key)
        # #Create S3 notification object which points to SQS.
        notification = aws_s3_notifications.SqsDestination(queue)
        filter1 = s3.NotificationKeyFilter(prefix="home/")

        # #Attach notificaton event to S3 bucket.

        bucket.add_event_notification(s3.EventType.OBJECT_CREATED,
                                      notification, filter1)
示例#20
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = ec2.Vpc(self, 'vpc1')

        bucket_name = 'my-cdk-bucket'
        s3.Bucket(self,
                  bucket_name,
                  bucket_name=bucket_name,
                  access_control=s3.BucketAccessControl.PUBLIC_READ_WRITE,
                  removal_policy=RemovalPolicy.DESTROY)

        ec2.Volume(self,
                   'vol1',
                   availability_zone='us-east-1a',
                   size=core.Size.gibibytes(8))

        sg = ec2.SecurityGroup(self, 'sg1', vpc=vpc)
        sg.add_ingress_rule(Peer.any_ipv4(), Port.tcp(22))

        kms.Key(self, 'kms1')
示例#21
0
    def __init__(self, app: core.App, id: str, *args, **kwargs) -> None:
        super().__init__(app, id, *args, **kwargs)

        key = kms.Key(
            self,
            'CloudTrailKey',
            description='cdk-cloudtrail',
            enable_key_rotation=True,
        )

        bucket = s3.Bucket(
            self,
            'CloudtrailBucket',
            encryption=s3.BucketEncryption.KMS,
            encryption_key=key,
        )

        topic = sns.Topic(
            self,
            'CloudTrailTopic',
            display_name='cdk-cloudtrail',
        )
示例#22
0
文件: app.py 项目: kyhau/aws-tools
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        env = kwargs.get("env")
        key_admin_role = aws_iam.Role.from_role_arn(self, "key_admin_role",
                                                    env["key_admin_arn"])

        key = aws_kms.Key(self,
                          f'{env["app_name"]}KmsKey',
                          enable_key_rotation=True)

        key.add_alias(alias_name)

        # Allow administration of the key
        key.add_to_resource_policy(
            aws_iam.PolicyStatement(actions=[
                "kms:Create*",
                "kms:Describe*",
                "kms:Enable*",
                "kms:List*",
                "kms:Put*",
                "kms:Update*",
                "kms:Revoke*",
                "kms:Disable*",
                "kms:Get*",
                "kms:Delete*",
                "kms:ScheduleKeyDeletion",
                "kms:CancelKeyDeletion",
                "kms:GenerateDataKey",
            ],
                                    principals=[key_admin_role],
                                    resources=["*"]))

        # Allow use of the key
        key.add_to_resource_policy(
            aws_iam.PolicyStatement(actions=[
                "kms:Decrypt",
                "kms:DescribeKey",
                "kms:Encrypt",
                "kms:GenerateDataKey*",
                "kms:ReEncrypt*",
            ],
                                    principals=[key_admin_role],
                                    resources=["*"]))

        bucket = aws_s3.Bucket(
            self,
            f'{env["app_name"]}Bucket',
            bucket_name=env["bucket_name"],
            block_public_access=aws_s3.BlockPublicAccess.BLOCK_ALL,
            encryption=aws_s3.BucketEncryption.KMS,
            encryption_key=key,
            removal_policy=core.RemovalPolicy.RETAIN,
        )

        for rw_role_arn in env["rw_role_arns"]:
            rw_role = aws_iam.Role.from_role_arn(self,
                                                 rw_role_arn.split("/")[-1],
                                                 rw_role_arn)
            bucket.grant_read_write(rw_role)

        core.CfnOutput(self, "BucketName", value=bucket.bucket_name)

        core.CfnOutput(
            self,
            "KeyAliasArn",
            value=core.Fn.sub("arn:aws:kms:${AWS::Region}:${AWS::AccountId}:" +
                              alias_name))
示例#23
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 *,
                 git_token_key="",
                 github_owner="",
                 github_repo="",
                 github_branch="",
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        role = iam.Role(
            self,
            "Role",
            assumed_by=iam.ServicePrincipal("codebuild.amazonaws.com"))

        role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AdministratorAccess"))

        cdk_project = codebuild.PipelineProject(
            self,
            "Codebuild",
            build_spec=codebuild.BuildSpec.from_source_filename(
                "codebuild/buildspec.yaml"),
            cache=codebuild.Cache.bucket(s3.Bucket(self, "Bucket")),
            environment=codebuild.BuildEnvironment(
                build_image=codebuild.LinuxBuildImage.STANDARD_2_0,
                privileged=True),
            role=role)

        source_output = codepipeline.Artifact()
        staging_output = codepipeline.Artifact()
        production_output = codepipeline.Artifact()

        source_action = codepipeline_actions.GitHubSourceAction(
            action_name="GitHub_Source",
            owner=github_owner,
            repo=github_repo,
            branch=github_branch,
            oauth_token=core.SecretValue.secrets_manager(git_token_key),
            output=source_output)

        staging_action = codepipeline_actions.CodeBuildAction(
            action_name="Deliver",
            project=cdk_project,
            input=source_output,
            outputs=[staging_output],
            environment_variables={"ENV": {
                "value": "stg"
            }})

        manual_approval_action = codepipeline_actions.ManualApprovalAction(
            action_name="Approve")

        production_action = codepipeline_actions.CodeBuildAction(
            action_name="Deliver",
            project=cdk_project,
            input=source_output,
            outputs=[production_output],
            environment_variables={"ENV": {
                "value": "prd"
            }})

        key = kms.Key(self, "key")
        bucket = s3.Bucket(self, "bucket_artifacts", encryption_key=key)
        pipeline = codepipeline.Pipeline(self,
                                         "Pipeline",
                                         artifact_bucket=bucket)
        pipeline.add_stage(stage_name="Source", actions=[source_action])
        pipeline.add_stage(stage_name="Staging", actions=[staging_action])
        pipeline.add_stage(stage_name="Approval",
                           actions=[manual_approval_action])
        pipeline.add_stage(stage_name="Production",
                           actions=[production_action])
示例#24
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        kms_for_s3 = kms.Key(
            self,
            "kms-for-s3",
            description="Encryption key for the KMS encrypted S3 bucket",
            removal_policy=core.RemovalPolicy.DESTROY  # We don't want this to stick around after the demo
        )

        different_kms_key = kms.Key(
            self,
            "different-kms-key",
            description="Another KMS Key",
            removal_policy=core.RemovalPolicy.DESTROY  # We don't want this to stick around after the demo
        )

        bucket_with_sse_s3 = s3.Bucket(
            self,
            "bucket-with-sse-s3",
            encryption=s3.BucketEncryption.S3_MANAGED
        )

        bucket_with_sse_s3.add_to_resource_policy(
            iam.PolicyStatement(
                effect=iam.Effect.DENY,
                actions=["s3:PutObject"],
                conditions={
                    "Null": {
                        "s3:x-amz-server-side-encryption": "false"
                    },
                    "StringNotEqualsIfExists": {
                        "s3:x-amz-server-side-encryption": "AES256"
                    }
                },
                principals=[iam.AnyPrincipal()],
                resources=[
                    bucket_with_sse_s3.arn_for_objects("*")
                ]
            )
        )

        bucket_with_sse_kms = s3.Bucket(
            self,
            "bucket-with-sse-kms",
            encryption=s3.BucketEncryption.KMS,
            encryption_key=kms_for_s3
        )

        bucket_with_sse_kms.add_to_resource_policy(
            iam.PolicyStatement(
                effect=iam.Effect.DENY,
                actions=["s3:PutObject"],
                conditions={
                    "Null": {
                        "s3:x-amz-server-side-encryption": "false"
                    },
                    "StringNotEqualsIfExists": {
                        "s3:x-amz-server-side-encryption": "aws:kms"
                    }
                },
                principals=[iam.AnyPrincipal()],
                resources=[
                    bucket_with_sse_kms.arn_for_objects("*")
                ]
            )
        )

        bucket_with_sse_kms.add_to_resource_policy(
            iam.PolicyStatement(
                effect=iam.Effect.DENY,
                actions=["s3:PutObject"],
                conditions={
                    "StringNotEqualsIfExists": {
                        "s3:x-amz-server-side-encryption-aws-kms-key-id": kms_for_s3.key_arn
                    }
                },
                principals=[iam.AnyPrincipal()],
                resources=[
                    bucket_with_sse_kms.arn_for_objects("*")
                ]
            )
        )

        encryption_test_function = _lambda.Function(
            self,
            "encryption-test-function",
            code=_lambda.Code.from_asset(
                path=os.path.join(os.path.dirname(__file__), "..", "encryption_test")
            ),
            handler="handler.lambda_handler",
            runtime=_lambda.Runtime.PYTHON_3_8,
            environment={
                "SSE_S3_BUCKET": bucket_with_sse_s3.bucket_name,
                "SSE_KMS_BUCKET": bucket_with_sse_kms.bucket_name,
                "KMS_FOR_S3": kms_for_s3.key_id,
                "DIFFERENT_KMS": different_kms_key.key_id,
            },
            timeout=core.Duration.seconds(15)
        )
        kms_for_s3.grant_encrypt_decrypt(encryption_test_function)
        different_kms_key.grant_encrypt_decrypt(encryption_test_function)
        bucket_with_sse_kms.grant_read_write(encryption_test_function)
        bucket_with_sse_s3.grant_read_write(encryption_test_function)
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here
        email_subscription_parameter = core.CfnParameter(
            self,
            "NotificationEmail",
            description="Email Address for Notification",
            type="String")
        email_subscription = email_subscription_parameter.value_as_string

        boto3_lambda_layer = None
        boto3_lambda_layer = self.create_dependencies_layer(
            id="boto3layer",
            requirements_path="./layers/boto3/requirements.txt",
            output_dir="./layers/boto3")

        cmk_key = kms.Key(
            self,
            "SNSEncryptionAtRestKey",
            description="SNS Encryption at rest key",
            alias="sns-encryption-at-rest",
            enable_key_rotation=True,
        )

        email_topic = sns.Topic(
            self,
            "AccessAnalyzerNotificationTopic",
            display_name="Access Analyzer Finding Notification Topic",
            master_key=cmk_key)
        email_topic.add_subscription(
            subscriptions.EmailSubscription(email_subscription))

        access_analyzer_event_bridge_event_handler = aws_lambda.Function(
            self,
            "access_analyzer_event_bridge_event_handler",
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            handler="access_analyzer_event_bridge_target.lambda_handler",
            code=aws_lambda.AssetCode("./functions/"),
            environment={'SNS_TOPIC_ARN': email_topic.topic_arn},
            layers=[boto3_lambda_layer])

        handler_statement = iam.PolicyStatement(
            actions=["iam:GetRole", "iam:UpdateAssumeRolePolicy"],
            effect=iam.Effect.ALLOW,
            resources=[
                "arn:aws:iam::{}:role/*".format(core.Stack.of(self).account)
            ])
        access_analyzer_event_bridge_event_handler.add_to_role_policy(
            handler_statement)

        notification_statement = iam.PolicyStatement(
            actions=[
                "sns:Publish",
            ],
            effect=iam.Effect.ALLOW,
            resources=[email_topic.topic_arn])

        access_analyzer_event_bridge_event_handler.add_to_role_policy(
            notification_statement)
        cmk_key.grant_encrypt_decrypt(
            access_analyzer_event_bridge_event_handler)

        access_analyzer_finding_rule = aws_events.Rule(
            self,
            "AccessAnalzyerFindingActiveEventRule",
            description="Access Analyzer Finding Event Active",
            enabled=True,
            event_pattern=aws_events.EventPattern(
                source=["aws.access-analyzer"],
                detail_type=["Access Analyzer Finding"],
                detail={
                    "status": ["ACTIVE"],
                    "resourceType": ["AWS::IAM:Role"]
                }),
            targets=[
                aws_events_targets.LambdaFunction(
                    access_analyzer_event_bridge_event_handler)
            ])
示例#26
0
# A simple EMR Profile that grants proper access to the Logs and Artifacts buckets
# By default S3 Server Side encryption is enabled
sse_s3_profile = emr_profile.EMRProfile(stack,
                                        'SSES3Profile',
                                        profile_name='sse-s3-profile',
                                        vpc=vpc,
                                        logs_bucket=logs_bucket,
                                        artifacts_bucket=artifacts_bucket)

sse_s3_profile \
    .authorize_input_bucket(data_bucket) \
    .authorize_output_bucket(data_bucket)

# Here we create a KMS Key to use for At Rest Encryption in S3 and Locally
kms_key = kms.Key(stack, 'AtRestKMSKey')

# And a new profile to use the KMS Key
sse_kms_profile = emr_profile.EMRProfile(stack,
                                         'SSEKMSProfile',
                                         profile_name='sse-kms-profile',
                                         vpc=vpc,
                                         logs_bucket=logs_bucket,
                                         artifacts_bucket=artifacts_bucket)

# Authorize the profile for the Data Bucket and set the At Rest Encryption type
sse_kms_profile \
    .authorize_input_bucket(data_bucket) \
    .authorize_output_bucket(data_bucket) \
    .set_s3_encryption(emr_profile.S3EncryptionMode.SSE_KMS, encryption_key=kms_key) \
    .set_local_disk_encryption(kms_key, ebs_encryption=True) \
示例#27
0
    def __init__(self, scope: Construct, construct_id: str,
                 **kwargs: str) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self.vpc = ec2.Vpc(
            self,
            "aws-data-wrangler-vpc",
            cidr="11.19.224.0/19",
            enable_dns_hostnames=True,
            enable_dns_support=True,
        )
        Tags.of(self.vpc).add("Name", "aws-data-wrangler")
        self.key = kms.Key(
            self,
            id="aws-data-wrangler-key",
            description="Aws Data Wrangler Test Key.",
            policy=iam.PolicyDocument(statements=[
                iam.PolicyStatement(
                    sid="Enable IAM User Permissions",
                    effect=iam.Effect.ALLOW,
                    actions=["kms:*"],
                    principals=[iam.AccountRootPrincipal()],
                    resources=["*"],
                )
            ]),
        )
        kms.Alias(
            self,
            "aws-data-wrangler-key-alias",
            alias_name="alias/aws-data-wrangler-key",
            target_key=self.key,
        )
        self.bucket = s3.Bucket(
            self,
            id="aws-data-wrangler",
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True,
            ),
            lifecycle_rules=[
                s3.LifecycleRule(
                    id="CleaningUp",
                    enabled=True,
                    expiration=Duration.days(1),
                    abort_incomplete_multipart_upload_after=Duration.days(1),
                ),
            ],
            versioned=True,
        )
        glue_db = glue.Database(
            self,
            id="aws_data_wrangler_glue_database",
            database_name="aws_data_wrangler",
            location_uri=f"s3://{self.bucket.bucket_name}",
        )
        log_group = logs.LogGroup(
            self,
            id="aws_data_wrangler_log_group",
            retention=logs.RetentionDays.ONE_MONTH,
        )
        log_stream = logs.LogStream(
            self,
            id="aws_data_wrangler_log_stream",
            log_group=log_group,
        )
        CfnOutput(self, "Region", value=self.region)
        CfnOutput(
            self,
            "VPC",
            value=self.vpc.vpc_id,
            export_name="aws-data-wrangler-base-VPC",
        )
        CfnOutput(
            self,
            "PublicSubnet1",
            value=self.vpc.public_subnets[0].subnet_id,
            export_name="aws-data-wrangler-base-PublicSubnet1",
        )
        CfnOutput(
            self,
            "PublicSubnet2",
            value=self.vpc.public_subnets[1].subnet_id,
            export_name="aws-data-wrangler-base-PublicSubnet2",
        )
        CfnOutput(
            self,
            "PrivateSubnet",
            value=self.vpc.private_subnets[0].subnet_id,
            export_name="aws-data-wrangler-base-PrivateSubnet",
        )
        CfnOutput(
            self,
            "KmsKeyArn",
            value=self.key.key_arn,
            export_name="aws-data-wrangler-base-KmsKeyArn",
        )
        CfnOutput(
            self,
            "BucketName",
            value=self.bucket.bucket_name,
            export_name="aws-data-wrangler-base-BucketName",
        )
        CfnOutput(self, "GlueDatabaseName", value=glue_db.database_name)
        CfnOutput(self, "LogGroupName", value=log_group.log_group_name)
        CfnOutput(self, "LogStream", value=log_stream.log_stream_name)
示例#28
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        email_subscription_parameter = core.CfnParameter(
            self,
            "EmailSubscriptionParameter",
            description="Email Address for Notification Subscription",
            allowed_pattern=
            '^(([^<>()\[\]\\.,;:\s@"]+(\.[^<>()\[\]\\.,;:\s@"]+)*)|(".+"))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))$',
            min_length=1,
            constraint_description="Must be a valid email.")
        email_subscription = email_subscription_parameter.value_as_string

        #runtime=aws_lambda.Runtime.PYTHON_3_8

        boto3_lambda_layer = None

        boto3_lambda_layer = self.create_dependencies_layer(
            id="boto3layer",
            requirements_path="./layers/boto3/requirements.txt",
            output_dir="./layers/boto3")

        is_inline = False

        context_enrichment = self.create_lambda_function(
            boto3_lambda_layer, "./functions/context-enrichment",
            "context_enrichment", is_inline)
        """
    context_enrichment=aws_lambda.Function(
      self,
      "context_enrichment",
      runtime=runtime,
      handler="app.handler",
      code=aws_lambda.AssetCode("./functions/context-enrichment"),
      layers=[boto3_lambda_layer]
    )
    """
        handler_statement = iam.PolicyStatement(actions=[
            "iam:ListRoleTags", "s3:GetBucketTagging", "lambda:ListTags",
            "sqs:ListQueueTags", "kms:ListAliases", "kms:ListResourceTags"
        ],
                                                effect=iam.Effect.ALLOW,
                                                resources=["*"])
        context_enrichment.add_to_role_policy(handler_statement)

        cmk_key = kms.Key(
            self,
            "SNSEncryptionAtRestKey",
            description="SNS Encryption at rest key",
            alias="sns-encryption-at-rest",
            enable_key_rotation=True,
        )

        email_topic = sns.Topic(
            self,
            "AccessAnalyzerNotificationTopic",
            display_name="Access Analyzer Finding Notification Topic",
            master_key=cmk_key)
        email_topic.add_subscription(
            subscriptions.EmailSubscription(email_subscription))

        notification = self.create_lambda_function(
            boto3_lambda_layer, "./functions/notification", "notification",
            is_inline, {"SNS_TOPIC_ARN": email_topic.topic_arn})
        """
    notification=aws_lambda.Function(
      self,
      "notification",
      runtime=runtime,
      handler="app.handler",
      code=aws_lambda.AssetCode("./functions/notification"),
      layers=[boto3_lambda_layer],
      environment={"SNS_TOPIC_ARN":email_topic.topic_arn}
    )
    """
        notification_statement = iam.PolicyStatement(actions=[
            "sns:Publish",
        ],
                                                     effect=iam.Effect.ALLOW,
                                                     resources=["*"])
        notification.add_to_role_policy(notification_statement)
        cmk_key.grant_encrypt_decrypt(notification)

        archive_access_analyzer_finding = self.create_lambda_function(
            boto3_lambda_layer, "./functions/archive-access-analyzer-finding",
            "archive-access-analyzer-finding", is_inline)
        """
    archive_access_analyzer_finding=aws_lambda.Function(
      self,
      "archive-access-analyzer-finding",
      runtime=runtime,
      handler="app.handler",
      code=aws_lambda.AssetCode("./functions/archive-access-analyzer-finding"),
      layers=[boto3_lambda_layer]
    )
    """
        archive_statement = iam.PolicyStatement(actions=[
            "access-analyzer:UpdateFindings",
        ],
                                                effect=iam.Effect.ALLOW,
                                                resources=["*"])
        archive_access_analyzer_finding.add_to_role_policy(archive_statement)

        evaluate_access_analyzer_finding = self.create_lambda_function(
            boto3_lambda_layer, "./functions/evaluate-access-analyzer-finding",
            "evaluate-access-analyzer-finding", is_inline)
        """
    evaluate_access_analyzer_finding=aws_lambda.Function(
      self,
      "evaluate-access-analyzer-finding",
      runtime=runtime,
      handler="app.handler",
      code=aws_lambda.AssetCode("./functions/evaluate-access-analyzer-finding"),
      layers=[boto3_lambda_layer]
    )
    """
        #https://docs.aws.amazon.com/cdk/api/latest/docs/aws-stepfunctions-readme.html
        access_analyzer_handler_task = sfn.Task(
            self,
            "Context Enrichment",
            task=sfn_tasks.InvokeFunction(context_enrichment),
            result_path="$.guid",
        )

        notification_task = sfn.Task(
            self,
            "Send Notification",
            task=sfn_tasks.InvokeFunction(notification),
            result_path="$.guid",
        )

        archive_task = sfn.Task(
            self,
            "Archive Finding",
            task=sfn_tasks.InvokeFunction(archive_access_analyzer_finding),
            result_path="$.guid",
        )

        evaluate_task = sfn.Task(
            self,
            "Evaluate Risk Level",
            task=sfn_tasks.InvokeFunction(evaluate_access_analyzer_finding),
            result_path="$.guid",
        )

        definition=access_analyzer_handler_task. \
          next(evaluate_task). \
          next(sfn.Choice(self, "Archive?"). \
            when(sfn.Condition.string_equals("$.guid.status", "ARCHIVE"), archive_task). \
            when(sfn.Condition.string_equals("$.guid.status", "NOTIFY"), notification_task) \
          )

        state_machine = sfn.StateMachine(
            self,
            "Access-Analyzer-Automatic-Finding-Archive-State-Machine",
            definition=definition,
            timeout=core.Duration.minutes(5),
        )

        #https://docs.aws.amazon.com/step-functions/latest/dg/tutorial-cloudwatch-events-s3.html
        access_analyzer_finding_rule = aws_events.Rule(
            self,
            "AccessAnalzyerFindingActiveEventRule",
            description="Access Analyzer Finding Event Active",
            enabled=True,
            event_pattern=aws_events.EventPattern(
                source=["aws.access-analyzer"],
                detail_type=["Access Analyzer Finding"],
                detail={"status": ["ACTIVE"]}),
            targets=[
                aws_events_targets.SfnStateMachine(state_machine),
                aws_events_targets.LambdaFunction(context_enrichment)
            ])
示例#29
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        if self.node.try_get_context('vpc_type'):
            validate_cdk_json(self)

        ES_LOADER_TIMEOUT = 600
        ######################################################################
        # REGION mapping / ELB & Lambda Arch
        ######################################################################
        elb_id_temp = region_info.FactName.ELBV2_ACCOUNT
        elb_map_temp = region_info.RegionInfo.region_map(elb_id_temp)
        region_dict = {}
        for region in elb_map_temp:
            # ELB account ID
            region_dict[region] = {'ElbV2AccountId': elb_map_temp[region]}
            # Lambda Arch
            if region in ('us-east-1', 'us-east-2', 'us-west-2', 'ap-south-1',
                          'ap-southeast-1', 'ap-southeast-2', 'ap-northeast-1',
                          'eu-central-1', 'eu-west-1', 'eu-west-2'):
                region_dict[region]['LambdaArch'] = (
                    aws_lambda.Architecture.ARM_64.name)
            else:
                region_dict[region]['LambdaArch'] = (
                    aws_lambda.Architecture.X86_64.name)
        region_mapping = core.CfnMapping(
            scope=self, id='RegionMap', mapping=region_dict)

        ######################################################################
        # get params
        ######################################################################
        allow_source_address = core.CfnParameter(
            self, 'AllowedSourceIpAddresses', allowed_pattern=r'^[0-9./\s]*',
            description='Space-delimited list of CIDR blocks',
            default='10.0.0.0/8 172.16.0.0/12 192.168.0.0/16')
        sns_email = core.CfnParameter(
            self, 'SnsEmail', allowed_pattern=r'^[0-9a-zA-Z@_\-\+\.]*',
            description=('Input your email as SNS topic, where Amazon '
                         'OpenSearch Service will send alerts to'),
            default='*****@*****.**')
        geoip_license_key = core.CfnParameter(
            self, 'GeoLite2LicenseKey', allowed_pattern=r'^[0-9a-zA-Z]{16}$',
            default='xxxxxxxxxxxxxxxx',
            description=("If you wolud like to enrich geoip locaiton such as "
                         "IP address's country, get a license key form MaxMind"
                         " and input the key. If you not, keep "
                         "xxxxxxxxxxxxxxxx"))
        reserved_concurrency = core.CfnParameter(
            self, 'ReservedConcurrency', default=10, type='Number',
            description=('Input reserved concurrency. Increase this value if '
                         'there are steady logs delay despite no errors'))
        aes_domain_name = self.node.try_get_context('aes_domain_name')
        bucket = f'{aes_domain_name}-{core.Aws.ACCOUNT_ID}'
        s3bucket_name_geo = f'{bucket}-geo'
        s3bucket_name_log = f'{bucket}-log'
        s3bucket_name_snapshot = f'{bucket}-snapshot'

        # organizations / multiaccount
        org_id = self.node.try_get_context('organizations').get('org_id')
        org_mgmt_id = self.node.try_get_context(
            'organizations').get('management_id')
        org_member_ids = self.node.try_get_context(
            'organizations').get('member_ids')
        no_org_ids = self.node.try_get_context(
            'no_organizations').get('aws_accounts')

        # Overwrite default S3 bucket name as customer name
        temp_geo = self.node.try_get_context('s3_bucket_name').get('geo')
        if temp_geo:
            s3bucket_name_geo = temp_geo
        else:
            print('Using default bucket names')
        temp_log = self.node.try_get_context('s3_bucket_name').get('log')
        if temp_log:
            s3bucket_name_log = temp_log
        elif org_id or no_org_ids:
            s3bucket_name_log = f'{aes_domain_name}-{self.account}-log'
        else:
            print('Using default bucket names')
        temp_snap = self.node.try_get_context('s3_bucket_name').get('snapshot')
        if temp_snap:
            s3bucket_name_snapshot = temp_snap
        else:
            print('Using default bucket names')
        kms_cmk_alias = self.node.try_get_context('kms_cmk_alias')
        if not kms_cmk_alias:
            kms_cmk_alias = 'aes-siem-key'
            print('Using default key alais')

        ######################################################################
        # deploy VPC when context is defined as using VPC
        ######################################################################
        # vpc_type is 'new' or 'import' or None
        vpc_type = self.node.try_get_context('vpc_type')

        if vpc_type == 'new':
            is_vpc = True
            vpc_cidr = self.node.try_get_context('new_vpc_nw_cidr_block')
            subnet_cidr_mask = int(
                self.node.try_get_context('new_vpc_subnet_cidr_mask'))
            is_vpc = True
            # VPC
            vpc_aes_siem = aws_ec2.Vpc(
                self, 'VpcAesSiem', cidr=vpc_cidr,
                max_azs=3, nat_gateways=0,
                subnet_configuration=[
                    aws_ec2.SubnetConfiguration(
                        subnet_type=aws_ec2.SubnetType.ISOLATED,
                        name='aes-siem-subnet', cidr_mask=subnet_cidr_mask)])
            subnet1 = vpc_aes_siem.isolated_subnets[0]
            subnets = [{'subnet_type': aws_ec2.SubnetType.ISOLATED}]
            vpc_subnets = aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.ISOLATED)
            vpc_aes_siem_opt = vpc_aes_siem.node.default_child.cfn_options
            vpc_aes_siem_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN
            for subnet in vpc_aes_siem.isolated_subnets:
                subnet_opt = subnet.node.default_child.cfn_options
                subnet_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN
        elif vpc_type == 'import':
            vpc_id = self.node.try_get_context('imported_vpc_id')
            vpc_aes_siem = aws_ec2.Vpc.from_lookup(
                self, 'VpcAesSiem', vpc_id=vpc_id)

            subnet_ids = get_subnet_ids(self)
            subnets = []
            for number, subnet_id in enumerate(subnet_ids, 1):
                obj_id = 'Subenet' + str(number)
                subnet = aws_ec2.Subnet.from_subnet_id(self, obj_id, subnet_id)
                subnets.append(subnet)
            subnet1 = subnets[0]
            vpc_subnets = aws_ec2.SubnetSelection(subnets=subnets)

        if vpc_type:
            is_vpc = True
            # Security Group
            sg_vpc_noinbound_aes_siem = aws_ec2.SecurityGroup(
                self, 'AesSiemVpcNoinboundSecurityGroup',
                security_group_name='aes-siem-noinbound-vpc-sg',
                vpc=vpc_aes_siem)

            sg_vpc_aes_siem = aws_ec2.SecurityGroup(
                self, 'AesSiemVpcSecurityGroup',
                security_group_name='aes-siem-vpc-sg',
                vpc=vpc_aes_siem)
            sg_vpc_aes_siem.add_ingress_rule(
                peer=aws_ec2.Peer.ipv4(vpc_aes_siem.vpc_cidr_block),
                connection=aws_ec2.Port.tcp(443),)
            sg_vpc_opt = sg_vpc_aes_siem.node.default_child.cfn_options
            sg_vpc_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

            # VPC Endpoint
            vpc_aes_siem.add_gateway_endpoint(
                'S3Endpoint', service=aws_ec2.GatewayVpcEndpointAwsService.S3,
                subnets=subnets)
            vpc_aes_siem.add_interface_endpoint(
                'SQSEndpoint', security_groups=[sg_vpc_aes_siem],
                service=aws_ec2.InterfaceVpcEndpointAwsService.SQS,)
            vpc_aes_siem.add_interface_endpoint(
                'KMSEndpoint', security_groups=[sg_vpc_aes_siem],
                service=aws_ec2.InterfaceVpcEndpointAwsService.KMS,)
        else:
            is_vpc = False

        is_vpc = core.CfnCondition(
            self, 'IsVpc', expression=core.Fn.condition_equals(is_vpc, True))
        """
        CloudFormation実行時の条件式の書き方
        ClassのBasesが aws_cdk.core.Resource の時は、
        node.default_child.cfn_options.condition = is_vpc
        ClassのBasesが aws_cdk.core.CfnResource の時は、
        cfn_options.condition = is_vpc
        """

        ######################################################################
        # create cmk of KMS to encrypt S3 bucket
        ######################################################################
        kms_aes_siem = aws_kms.Key(
            self, 'KmsAesSiemLog', description='CMK for SIEM solution',
            removal_policy=core.RemovalPolicy.RETAIN)

        aws_kms.Alias(
            self, 'KmsAesSiemLogAlias', alias_name=kms_cmk_alias,
            target_key=kms_aes_siem,
            removal_policy=core.RemovalPolicy.RETAIN)

        kms_aes_siem.add_to_resource_policy(
            aws_iam.PolicyStatement(
                sid='Allow GuardDuty to use the key',
                actions=['kms:GenerateDataKey'],
                principals=[aws_iam.ServicePrincipal(
                    'guardduty.amazonaws.com')],
                resources=['*'],),)

        kms_aes_siem.add_to_resource_policy(
            aws_iam.PolicyStatement(
                sid='Allow VPC Flow Logs to use the key',
                actions=['kms:Encrypt', 'kms:Decrypt', 'kms:ReEncrypt*',
                         'kms:GenerateDataKey*', 'kms:DescribeKey'],
                principals=[aws_iam.ServicePrincipal(
                    'delivery.logs.amazonaws.com')],
                resources=['*'],),)
        # basic policy
        key_policy_basic1 = aws_iam.PolicyStatement(
            sid='Allow principals in the account to decrypt log files',
            actions=['kms:DescribeKey', 'kms:ReEncryptFrom'],
            principals=[aws_iam.AccountPrincipal(
                account_id=core.Aws.ACCOUNT_ID)],
            resources=['*'],)
        kms_aes_siem.add_to_resource_policy(key_policy_basic1)

        # for Athena
        key_policy_athena = aws_iam.PolicyStatement(
            sid='Allow Athena to query s3 objects with this key',
            actions=['kms:Decrypt', 'kms:DescribeKey', 'kms:Encrypt',
                     'kms:GenerateDataKey*', 'kms:ReEncrypt*'],
            principals=[aws_iam.AccountPrincipal(
                account_id=core.Aws.ACCOUNT_ID)],
            resources=['*'],
            conditions={'ForAnyValue:StringEquals': {
                'aws:CalledVia': 'athena.amazonaws.com'}})
        kms_aes_siem.add_to_resource_policy(key_policy_athena)

        # for CloudTrail
        key_policy_trail1 = aws_iam.PolicyStatement(
            sid='Allow CloudTrail to describe key',
            actions=['kms:DescribeKey'],
            principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
            resources=['*'],)
        kms_aes_siem.add_to_resource_policy(key_policy_trail1)

        key_policy_trail2 = aws_iam.PolicyStatement(
            sid=('Allow CloudTrail to encrypt logs'),
            actions=['kms:GenerateDataKey*'],
            principals=[aws_iam.ServicePrincipal(
                'cloudtrail.amazonaws.com')],
            resources=['*'],
            conditions={'StringLike': {
                'kms:EncryptionContext:aws:cloudtrail:arn': [
                    f'arn:aws:cloudtrail:*:{core.Aws.ACCOUNT_ID}:trail/*']}})
        kms_aes_siem.add_to_resource_policy(key_policy_trail2)

        ######################################################################
        # create s3 bucket
        ######################################################################
        block_pub = aws_s3.BlockPublicAccess(
            block_public_acls=True,
            ignore_public_acls=True,
            block_public_policy=True,
            restrict_public_buckets=True
        )
        s3_geo = aws_s3.Bucket(
            self, 'S3BucketForGeoip', block_public_access=block_pub,
            bucket_name=s3bucket_name_geo,
            # removal_policy=core.RemovalPolicy.DESTROY,
        )

        # create s3 bucket for log collector
        s3_log = aws_s3.Bucket(
            self, 'S3BucketForLog', block_public_access=block_pub,
            bucket_name=s3bucket_name_log, versioned=True,
            encryption=aws_s3.BucketEncryption.S3_MANAGED,
            # removal_policy=core.RemovalPolicy.DESTROY,
        )

        # create s3 bucket for aes snapshot
        s3_snapshot = aws_s3.Bucket(
            self, 'S3BucketForSnapshot', block_public_access=block_pub,
            bucket_name=s3bucket_name_snapshot,
            # removal_policy=core.RemovalPolicy.DESTROY,
        )

        ######################################################################
        # IAM Role
        ######################################################################
        # delopyment policy for lambda deploy-aes
        arn_prefix = f'arn:aws:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}'
        loggroup_aes = f'log-group:/aws/aes/domains/{aes_domain_name}/*'
        loggroup_opensearch = (
            f'log-group:/aws/OpenSearchService/domains/{aes_domain_name}/*')
        loggroup_lambda = 'log-group:/aws/lambda/aes-siem-*'
        policydoc_create_loggroup = aws_iam.PolicyDocument(
            statements=[
                aws_iam.PolicyStatement(
                    actions=[
                        'logs:PutResourcePolicy',
                        'logs:DescribeLogGroups',
                        'logs:DescribeLogStreams'
                    ],
                    resources=[f'{arn_prefix}:*', ]
                ),
                aws_iam.PolicyStatement(
                    actions=[
                        'logs:CreateLogGroup', 'logs:CreateLogStream',
                        'logs:PutLogEvents', 'logs:PutRetentionPolicy'],
                    resources=[
                        f'{arn_prefix}:{loggroup_aes}',
                        f'{arn_prefix}:{loggroup_opensearch}',
                        f'{arn_prefix}:{loggroup_lambda}',
                    ],
                )
            ]
        )

        policydoc_crhelper = aws_iam.PolicyDocument(
            statements=[
                aws_iam.PolicyStatement(
                    actions=[
                        'lambda:AddPermission',
                        'lambda:RemovePermission',
                        'events:ListRules',
                        'events:PutRule',
                        'events:DeleteRule',
                        'events:PutTargets',
                        'events:RemoveTargets'],
                    resources=['*']
                )
            ]
        )

        # snaphot rule for AES
        policydoc_snapshot = aws_iam.PolicyDocument(
            statements=[
                aws_iam.PolicyStatement(
                    actions=['s3:ListBucket'],
                    resources=[s3_snapshot.bucket_arn]
                ),
                aws_iam.PolicyStatement(
                    actions=['s3:GetObject', 's3:PutObject',
                             's3:DeleteObject'],
                    resources=[s3_snapshot.bucket_arn + '/*']
                )
            ]
        )
        aes_siem_snapshot_role = aws_iam.Role(
            self, 'AesSiemSnapshotRole',
            role_name='aes-siem-snapshot-role',
            inline_policies=[policydoc_snapshot, ],
            assumed_by=aws_iam.ServicePrincipal('es.amazonaws.com')
        )

        policydoc_assume_snapshrole = aws_iam.PolicyDocument(
            statements=[
                aws_iam.PolicyStatement(
                    actions=['iam:PassRole'],
                    resources=[aes_siem_snapshot_role.role_arn]
                ),
            ]
        )

        aes_siem_deploy_role_for_lambda = aws_iam.Role(
            self, 'AesSiemDeployRoleForLambda',
            role_name='aes-siem-deploy-role-for-lambda',
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AmazonOpenSearchServiceFullAccess'),
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AWSLambdaBasicExecutionRole'),
            ],
            inline_policies=[policydoc_assume_snapshrole, policydoc_snapshot,
                             policydoc_create_loggroup, policydoc_crhelper],
            assumed_by=aws_iam.ServicePrincipal('lambda.amazonaws.com')
        )

        if vpc_type:
            aes_siem_deploy_role_for_lambda.add_managed_policy(
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AWSLambdaVPCAccessExecutionRole')
            )

        # for alert from Amazon OpenSearch Service
        aes_siem_sns_role = aws_iam.Role(
            self, 'AesSiemSnsRole',
            role_name='aes-siem-sns-role',
            assumed_by=aws_iam.ServicePrincipal('es.amazonaws.com')
        )

        # EC2 role
        aes_siem_es_loader_ec2_role = aws_iam.Role(
            self, 'AesSiemEsLoaderEC2Role',
            role_name='aes-siem-es-loader-for-ec2',
            assumed_by=aws_iam.ServicePrincipal('ec2.amazonaws.com'),
        )

        aws_iam.CfnInstanceProfile(
            self, 'AesSiemEsLoaderEC2InstanceProfile',
            instance_profile_name=aes_siem_es_loader_ec2_role.role_name,
            roles=[aes_siem_es_loader_ec2_role.role_name]
        )

        ######################################################################
        # in VPC
        ######################################################################
        aes_role_exist = check_iam_role('/aws-service-role/es.amazonaws.com/')
        if vpc_type and not aes_role_exist:
            slr_aes = aws_iam.CfnServiceLinkedRole(
                self, 'AWSServiceRoleForAmazonOpenSearchService',
                aws_service_name='es.amazonaws.com',
                description='Created by cloudformation of siem stack'
            )
            slr_aes.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN

        ######################################################################
        # SQS for es-laoder's DLQ
        ######################################################################
        sqs_aes_siem_dlq = aws_sqs.Queue(
            self, 'AesSiemDlq', queue_name='aes-siem-dlq',
            retention_period=core.Duration.days(14))

        sqs_aes_siem_splitted_logs = aws_sqs.Queue(
            self, 'AesSiemSqsSplitLogs',
            queue_name='aes-siem-sqs-splitted-logs',
            dead_letter_queue=aws_sqs.DeadLetterQueue(
                max_receive_count=2, queue=sqs_aes_siem_dlq),
            visibility_timeout=core.Duration.seconds(ES_LOADER_TIMEOUT),
            retention_period=core.Duration.days(14))

        ######################################################################
        # Setup Lambda
        ######################################################################
        # setup lambda of es_loader
        lambda_es_loader_vpc_kwargs = {}
        if vpc_type:
            lambda_es_loader_vpc_kwargs = {
                'security_group': sg_vpc_noinbound_aes_siem,
                'vpc': vpc_aes_siem,
                'vpc_subnets': vpc_subnets,
            }

        lambda_es_loader = aws_lambda.Function(
            self, 'LambdaEsLoader', **lambda_es_loader_vpc_kwargs,
            function_name='aes-siem-es-loader',
            description=f'{SOLUTION_NAME} / es-loader',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            architecture=aws_lambda.Architecture.X86_64,
            # architecture=region_mapping.find_in_map(
            #    core.Aws.REGION, 'LambdaArm'),
            # code=aws_lambda.Code.asset('../lambda/es_loader.zip'),
            code=aws_lambda.Code.asset('../lambda/es_loader'),
            handler='index.lambda_handler',
            memory_size=2048,
            timeout=core.Duration.seconds(ES_LOADER_TIMEOUT),
            reserved_concurrent_executions=(
                reserved_concurrency.value_as_number),
            dead_letter_queue_enabled=True,
            dead_letter_queue=sqs_aes_siem_dlq,
            environment={
                'GEOIP_BUCKET': s3bucket_name_geo, 'LOG_LEVEL': 'info',
                'POWERTOOLS_LOGGER_LOG_EVENT': 'false',
                'POWERTOOLS_SERVICE_NAME': 'es-loader',
                'POWERTOOLS_METRICS_NAMESPACE': 'SIEM'})
        es_loader_newver = lambda_es_loader.add_version(
            name=__version__, description=__version__)
        es_loader_opt = es_loader_newver.node.default_child.cfn_options
        es_loader_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        # send only
        # sqs_aes_siem_dlq.grant(lambda_es_loader, 'sqs:SendMessage')
        # send and reieve. but it must be loop
        sqs_aes_siem_dlq.grant(
            lambda_es_loader, 'sqs:SendMessage', 'sqs:ReceiveMessage',
            'sqs:DeleteMessage', 'sqs:GetQueueAttributes')

        sqs_aes_siem_splitted_logs.grant(
            lambda_es_loader, 'sqs:SendMessage', 'sqs:ReceiveMessage',
            'sqs:DeleteMessage', 'sqs:GetQueueAttributes')

        lambda_es_loader.add_event_source(
            aws_lambda_event_sources.SqsEventSource(
                sqs_aes_siem_splitted_logs, batch_size=1))

        # es-loaer on EC2 role
        sqs_aes_siem_dlq.grant(
            aes_siem_es_loader_ec2_role, 'sqs:GetQueue*', 'sqs:ListQueues*',
            'sqs:ReceiveMessage*', 'sqs:DeleteMessage*')

        lambda_geo = aws_lambda.Function(
            self, 'LambdaGeoipDownloader',
            function_name='aes-siem-geoip-downloader',
            description=f'{SOLUTION_NAME} / geoip-downloader',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            architecture=aws_lambda.Architecture.X86_64,
            # architecture=region_mapping.find_in_map(
            #    core.Aws.REGION, 'LambdaArm'),
            code=aws_lambda.Code.asset('../lambda/geoip_downloader'),
            handler='index.lambda_handler',
            memory_size=320,
            timeout=core.Duration.seconds(300),
            environment={
                's3bucket_name': s3bucket_name_geo,
                'license_key': geoip_license_key.value_as_string,
            }
        )
        lambda_geo_newver = lambda_geo.add_version(
            name=__version__, description=__version__)
        lamba_geo_opt = lambda_geo_newver.node.default_child.cfn_options
        lamba_geo_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        ######################################################################
        # setup OpenSearch Service
        ######################################################################
        lambda_deploy_es = aws_lambda.Function(
            self, 'LambdaDeployAES',
            function_name='aes-siem-deploy-aes',
            description=f'{SOLUTION_NAME} / opensearch domain deployment',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            architecture=aws_lambda.Architecture.X86_64,
            # architecture=region_mapping.find_in_map(
            #    core.Aws.REGION, 'LambdaArm'),
            # code=aws_lambda.Code.asset('../lambda/deploy_es.zip'),
            code=aws_lambda.Code.asset('../lambda/deploy_es'),
            handler='index.aes_domain_handler',
            memory_size=128,
            timeout=core.Duration.seconds(300),
            environment={
                'accountid': core.Aws.ACCOUNT_ID,
                'aes_domain_name': aes_domain_name,
                'aes_admin_role': aes_siem_deploy_role_for_lambda.role_arn,
                'es_loader_role': lambda_es_loader.role.role_arn,
                'allow_source_address': allow_source_address.value_as_string,
            },
            role=aes_siem_deploy_role_for_lambda,
        )
        lambda_deploy_es.add_environment(
            's3_snapshot', s3_snapshot.bucket_name)
        if vpc_type:
            lambda_deploy_es.add_environment(
                'vpc_subnet_id', subnet1.subnet_id)
            lambda_deploy_es.add_environment(
                'security_group_id', sg_vpc_aes_siem.security_group_id)
        else:
            lambda_deploy_es.add_environment('vpc_subnet_id', 'None')
            lambda_deploy_es.add_environment('security_group_id', 'None')
        deploy_es_newver = lambda_deploy_es.add_version(
            name=__version__, description=__version__)
        deploy_es_opt = deploy_es_newver.node.default_child.cfn_options
        deploy_es_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        # execute lambda_deploy_es to deploy Amaozon ES Domain
        aes_domain = aws_cloudformation.CfnCustomResource(
            self, 'AesSiemDomainDeployedR2',
            service_token=lambda_deploy_es.function_arn,)
        aes_domain.add_override('Properties.ConfigVersion', __version__)

        es_endpoint = aes_domain.get_att('es_endpoint').to_string()
        lambda_es_loader.add_environment('ES_ENDPOINT', es_endpoint)
        lambda_es_loader.add_environment(
            'SQS_SPLITTED_LOGS_URL', sqs_aes_siem_splitted_logs.queue_url)

        lambda_configure_es_vpc_kwargs = {}
        if vpc_type:
            lambda_configure_es_vpc_kwargs = {
                'security_group': sg_vpc_noinbound_aes_siem,
                'vpc': vpc_aes_siem,
                'vpc_subnets': aws_ec2.SubnetSelection(subnets=[subnet1, ]), }
        lambda_configure_es = aws_lambda.Function(
            self, 'LambdaConfigureAES', **lambda_configure_es_vpc_kwargs,
            function_name='aes-siem-configure-aes',
            description=f'{SOLUTION_NAME} / opensearch configuration',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            architecture=aws_lambda.Architecture.X86_64,
            # architecture=region_mapping.find_in_map(
            #    core.Aws.REGION, 'LambdaArm'),
            code=aws_lambda.Code.asset('../lambda/deploy_es'),
            handler='index.aes_config_handler',
            memory_size=128,
            timeout=core.Duration.seconds(300),
            environment={
                'accountid': core.Aws.ACCOUNT_ID,
                'aes_domain_name': aes_domain_name,
                'aes_admin_role': aes_siem_deploy_role_for_lambda.role_arn,
                'es_loader_role': lambda_es_loader.role.role_arn,
                'allow_source_address': allow_source_address.value_as_string,
                'es_endpoint': es_endpoint,
            },
            role=aes_siem_deploy_role_for_lambda,
        )
        lambda_configure_es.add_environment(
            's3_snapshot', s3_snapshot.bucket_name)
        if vpc_type:
            lambda_configure_es.add_environment(
                'vpc_subnet_id', subnet1.subnet_id)
            lambda_configure_es.add_environment(
                'security_group_id', sg_vpc_aes_siem.security_group_id)
        else:
            lambda_configure_es.add_environment('vpc_subnet_id', 'None')
            lambda_configure_es.add_environment('security_group_id', 'None')
        configure_es_newver = lambda_configure_es.add_version(
            name=__version__, description=__version__)
        configure_es_opt = configure_es_newver.node.default_child.cfn_options
        configure_es_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        aes_config = aws_cloudformation.CfnCustomResource(
            self, 'AesSiemDomainConfiguredR2',
            service_token=lambda_configure_es.function_arn,)
        aes_config.add_override('Properties.ConfigVersion', __version__)
        aes_config.add_depends_on(aes_domain)
        aes_config.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN

        es_arn = (f'arn:aws:es:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}'
                  f':domain/{aes_domain_name}')
        # grant permission to es_loader role
        inline_policy_to_load_entries_into_es = aws_iam.Policy(
            self, 'aes-siem-policy-to-load-entries-to-es',
            policy_name='aes-siem-policy-to-load-entries-to-es',
            statements=[
                aws_iam.PolicyStatement(
                    actions=['es:*'],
                    resources=[es_arn + '/*', ]),
            ]
        )
        lambda_es_loader.role.attach_inline_policy(
            inline_policy_to_load_entries_into_es)
        aes_siem_es_loader_ec2_role.attach_inline_policy(
            inline_policy_to_load_entries_into_es)

        # grant additional permission to es_loader role
        additional_kms_cmks = self.node.try_get_context('additional_kms_cmks')
        if additional_kms_cmks:
            inline_policy_access_to_additional_cmks = aws_iam.Policy(
                self, 'access_to_additional_cmks',
                policy_name='access_to_additional_cmks',
                statements=[
                    aws_iam.PolicyStatement(
                        actions=['kms:Decrypt'],
                        resources=sorted(set(additional_kms_cmks))
                    )
                ]
            )
            lambda_es_loader.role.attach_inline_policy(
                inline_policy_access_to_additional_cmks)
            aes_siem_es_loader_ec2_role.attach_inline_policy(
                inline_policy_access_to_additional_cmks)
        additional_buckets = self.node.try_get_context('additional_s3_buckets')

        if additional_buckets:
            buckets_list = []
            for bucket in additional_buckets:
                buckets_list.append(f'arn:aws:s3:::{bucket}')
                buckets_list.append(f'arn:aws:s3:::{bucket}/*')
            inline_policy_access_to_additional_buckets = aws_iam.Policy(
                self, 'access_to_additional_buckets',
                policy_name='access_to_additional_buckets',
                statements=[
                    aws_iam.PolicyStatement(
                        actions=['s3:GetObject*', 's3:GetBucket*', 's3:List*'],
                        resources=sorted(set(buckets_list))
                    )
                ]
            )
            lambda_es_loader.role.attach_inline_policy(
                inline_policy_access_to_additional_buckets)
            aes_siem_es_loader_ec2_role.attach_inline_policy(
                inline_policy_access_to_additional_buckets)

        kms_aes_siem.grant_decrypt(lambda_es_loader)
        kms_aes_siem.grant_decrypt(aes_siem_es_loader_ec2_role)

        ######################################################################
        # s3 notification and grant permisssion
        ######################################################################
        s3_geo.grant_read_write(lambda_geo)
        s3_geo.grant_read(lambda_es_loader)
        s3_geo.grant_read(aes_siem_es_loader_ec2_role)
        s3_log.grant_read(lambda_es_loader)
        s3_log.grant_read(aes_siem_es_loader_ec2_role)

        # create s3 notification for es_loader
        notification = aws_s3_notifications.LambdaDestination(lambda_es_loader)

        # assign notification for the s3 PUT event type
        # most log system use PUT, but also CLB use POST & Multipart Upload
        s3_log.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED, notification,
            aws_s3.NotificationKeyFilter(prefix='AWSLogs/'))

        # For user logs, not AWS logs
        s3_log.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED, notification,
            aws_s3.NotificationKeyFilter(prefix='UserLogs/'))

        # Download geoip to S3 once by executing lambda_geo
        get_geodb = aws_cloudformation.CfnCustomResource(
            self, 'ExecLambdaGeoipDownloader',
            service_token=lambda_geo.function_arn,)
        get_geodb.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN

        # Download geoip every day at 6PM UTC
        rule = aws_events.Rule(
            self, 'CwlRuleLambdaGeoipDownloaderDilly',
            schedule=aws_events.Schedule.rate(core.Duration.hours(12)))
        rule.add_target(aws_events_targets.LambdaFunction(lambda_geo))

        ######################################################################
        # bucket policy
        ######################################################################
        s3_awspath = s3_log.bucket_arn + '/AWSLogs/' + core.Aws.ACCOUNT_ID
        bucket_policy_common1 = aws_iam.PolicyStatement(
            sid='ELB Policy',
            principals=[aws_iam.AccountPrincipal(
                account_id=region_mapping.find_in_map(
                    core.Aws.REGION, 'ElbV2AccountId'))],
            actions=['s3:PutObject'], resources=[s3_awspath + '/*'],)
        # NLB / ALB / R53resolver / VPC Flow Logs
        bucket_policy_elb1 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryAclCheck For ALB NLB R53Resolver Flowlogs',
            principals=[aws_iam.ServicePrincipal(
                'delivery.logs.amazonaws.com')],
            actions=['s3:GetBucketAcl', 's3:ListBucket'],
            resources=[s3_log.bucket_arn],)
        bucket_policy_elb2 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryWrite For ALB NLB R53Resolver Flowlogs',
            principals=[aws_iam.ServicePrincipal(
                'delivery.logs.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_awspath + '/*'],
            conditions={
                'StringEquals': {'s3:x-amz-acl': 'bucket-owner-full-control'}})
        s3_log.add_to_resource_policy(bucket_policy_common1)
        s3_log.add_to_resource_policy(bucket_policy_elb1)
        s3_log.add_to_resource_policy(bucket_policy_elb2)

        # CloudTrail
        bucket_policy_trail1 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryAclCheck For Cloudtrail',
            principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
            actions=['s3:GetBucketAcl'], resources=[s3_log.bucket_arn],)
        bucket_policy_trail2 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryWrite For CloudTrail',
            principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_awspath + '/*'],
            conditions={
                'StringEquals': {'s3:x-amz-acl': 'bucket-owner-full-control'}})
        s3_log.add_to_resource_policy(bucket_policy_trail1)
        s3_log.add_to_resource_policy(bucket_policy_trail2)

        # GuardDuty
        bucket_policy_gd1 = aws_iam.PolicyStatement(
            sid='Allow GuardDuty to use the getBucketLocation operation',
            principals=[aws_iam.ServicePrincipal('guardduty.amazonaws.com')],
            actions=['s3:GetBucketLocation'], resources=[s3_log.bucket_arn],)
        bucket_policy_gd2 = aws_iam.PolicyStatement(
            sid='Allow GuardDuty to upload objects to the bucket',
            principals=[aws_iam.ServicePrincipal('guardduty.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_log.bucket_arn + '/*'],)
        bucket_policy_gd5 = aws_iam.PolicyStatement(
            sid='Deny non-HTTPS access', effect=aws_iam.Effect.DENY,
            actions=['s3:*'], resources=[s3_log.bucket_arn + '/*'],
            conditions={'Bool': {'aws:SecureTransport': 'false'}})
        bucket_policy_gd5.add_any_principal()
        s3_log.add_to_resource_policy(bucket_policy_gd1)
        s3_log.add_to_resource_policy(bucket_policy_gd2)
        s3_log.add_to_resource_policy(bucket_policy_gd5)

        # Config
        bucket_policy_config1 = aws_iam.PolicyStatement(
            sid='AWSConfig BucketPermissionsCheck and BucketExistenceCheck',
            principals=[aws_iam.ServicePrincipal('config.amazonaws.com')],
            actions=['s3:GetBucketAcl', 's3:ListBucket'],
            resources=[s3_log.bucket_arn],)
        bucket_policy_config2 = aws_iam.PolicyStatement(
            sid='AWSConfigBucketDelivery',
            principals=[aws_iam.ServicePrincipal('config.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_awspath + '/Config/*'],
            conditions={
                'StringEquals': {'s3:x-amz-acl': 'bucket-owner-full-control'}})
        s3_log.add_to_resource_policy(bucket_policy_config1)
        s3_log.add_to_resource_policy(bucket_policy_config2)

        # geoip
        bucket_policy_geo1 = aws_iam.PolicyStatement(
            sid='Allow geoip downloader and es-loader to read/write',
            principals=[lambda_es_loader.role, lambda_geo.role],
            actions=['s3:PutObject', 's3:GetObject', 's3:DeleteObject'],
            resources=[s3_geo.bucket_arn + '/*'],)
        s3_geo.add_to_resource_policy(bucket_policy_geo1)

        # ES Snapshot
        bucket_policy_snapshot = aws_iam.PolicyStatement(
            sid='Allow ES to store snapshot',
            principals=[aes_siem_snapshot_role],
            actions=['s3:PutObject', 's3:GetObject', 's3:DeleteObject'],
            resources=[s3_snapshot.bucket_arn + '/*'],)
        s3_snapshot.add_to_resource_policy(bucket_policy_snapshot)

        ######################################################################
        # for multiaccount / organizaitons
        ######################################################################
        if org_id or no_org_ids:
            ##################################################################
            # KMS key policy for multiaccount / organizaitons
            ##################################################################
            # for CloudTrail
            cond_tail2 = self.make_resource_list(
                path='arn:aws:cloudtrail:*:', tail=':trail/*',
                keys=self.list_without_none(org_mgmt_id, no_org_ids))
            key_policy_mul_trail2 = aws_iam.PolicyStatement(
                sid=('Allow CloudTrail to encrypt logs for multiaccounts'),
                actions=['kms:GenerateDataKey*'],
                principals=[aws_iam.ServicePrincipal(
                    'cloudtrail.amazonaws.com')],
                resources=['*'],
                conditions={'StringLike': {
                    'kms:EncryptionContext:aws:cloudtrail:arn': cond_tail2}})
            kms_aes_siem.add_to_resource_policy(key_policy_mul_trail2)

            # for replicaiton
            key_policy_rep1 = aws_iam.PolicyStatement(
                sid=('Enable cross account encrypt access for S3 Cross Region '
                     'Replication'),
                actions=['kms:Encrypt'],
                principals=self.make_account_principals(
                    org_mgmt_id, org_member_ids, no_org_ids),
                resources=['*'],)
            kms_aes_siem.add_to_resource_policy(key_policy_rep1)

            ##################################################################
            # Buckdet Policy for multiaccount / organizaitons
            ##################################################################
            s3_log_bucket_arn = 'arn:aws:s3:::' + s3bucket_name_log

            # for CloudTrail
            s3_mulpaths = self.make_resource_list(
                path=f'{s3_log_bucket_arn}/AWSLogs/', tail='/*',
                keys=self.list_without_none(org_id, org_mgmt_id, no_org_ids))
            bucket_policy_org_trail = aws_iam.PolicyStatement(
                sid='AWSCloudTrailWrite for Multiaccounts / Organizations',
                principals=[
                    aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
                actions=['s3:PutObject'], resources=s3_mulpaths,
                conditions={'StringEquals': {
                    's3:x-amz-acl': 'bucket-owner-full-control'}})
            s3_log.add_to_resource_policy(bucket_policy_org_trail)

            # config
            s3_conf_multpaths = self.make_resource_list(
                path=f'{s3_log_bucket_arn}/AWSLogs/', tail='/Config/*',
                keys=self.list_without_none(org_id, org_mgmt_id, no_org_ids))
            bucket_policy_mul_config2 = aws_iam.PolicyStatement(
                sid='AWSConfigBucketDelivery',
                principals=[aws_iam.ServicePrincipal('config.amazonaws.com')],
                actions=['s3:PutObject'], resources=s3_conf_multpaths,
                conditions={'StringEquals': {
                    's3:x-amz-acl': 'bucket-owner-full-control'}})
            s3_log.add_to_resource_policy(bucket_policy_mul_config2)

            # for replication
            bucket_policy_rep1 = aws_iam.PolicyStatement(
                sid='PolicyForDestinationBucket / Permissions on objects',
                principals=self.make_account_principals(
                    org_mgmt_id, org_member_ids, no_org_ids),
                actions=['s3:ReplicateDelete', 's3:ReplicateObject',
                         's3:ReplicateTags', 's3:GetObjectVersionTagging',
                         's3:ObjectOwnerOverrideToBucketOwner'],
                resources=[f'{s3_log_bucket_arn}/*'])
            bucket_policy_rep2 = aws_iam.PolicyStatement(
                sid='PolicyForDestinationBucket / Permissions on bucket',
                principals=self.make_account_principals(
                    org_mgmt_id, org_member_ids, no_org_ids),
                actions=['s3:List*', 's3:GetBucketVersioning',
                         's3:PutBucketVersioning'],
                resources=[f'{s3_log_bucket_arn}'])
            s3_log.add_to_resource_policy(bucket_policy_rep1)
            s3_log.add_to_resource_policy(bucket_policy_rep2)

        ######################################################################
        # SNS topic for Amazon OpenSearch Service Alert
        ######################################################################
        sns_topic = aws_sns.Topic(
            self, 'SnsTopic', topic_name='aes-siem-alert',
            display_name='AES SIEM')

        sns_topic.add_subscription(aws_sns_subscriptions.EmailSubscription(
            email_address=sns_email.value_as_string))
        sns_topic.grant_publish(aes_siem_sns_role)

        ######################################################################
        # output of CFn
        ######################################################################
        kibanaurl = f'https://{es_endpoint}/_dashboards/'
        kibanaadmin = aes_domain.get_att('kibanaadmin').to_string()
        kibanapass = aes_domain.get_att('kibanapass').to_string()

        core.CfnOutput(self, 'RoleDeploy', export_name='role-deploy',
                       value=aes_siem_deploy_role_for_lambda.role_arn)
        core.CfnOutput(self, 'DashboardsUrl', export_name='dashboards-url',
                       value=kibanaurl)
        core.CfnOutput(self, 'DashboardsPassword',
                       export_name='dashboards-pass', value=kibanapass,
                       description=('Please change the password in OpenSearch '
                                    'Dashboards ASAP'))
        core.CfnOutput(self, 'DashboardsAdminID',
                       export_name='dashboards-admin', value=kibanaadmin)
示例#30
0
    def __init__(
        self,
        scope: Construct,
        id: str,
        context: "Context",
        team_name: str,
        team_policies: List[str],
        image: Optional[str],
    ) -> None:
        self.scope = scope
        self.id = id
        self.context: "Context" = context
        self.team_name: str = team_name
        self.team_policies: List[str] = team_policies
        self.image: Optional[str] = image
        super().__init__(
            scope=scope,
            id=id,
            stack_name=id,
            env=Environment(account=self.context.account_id,
                            region=self.context.region),
        )
        Tags.of(scope=cast(IConstruct, self)).add(
            key="Env", value=f"orbit-{self.context.name}")
        Tags.of(scope=cast(IConstruct, self)).add(key="TeamSpace",
                                                  value=self.team_name)

        if self.context.networking.vpc_id is None:
            raise ValueError("self.context.networking.vpc_id is None!")
        self.i_vpc = ec2.Vpc.from_vpc_attributes(
            scope=self,
            id="vpc",
            vpc_id=self.context.networking.vpc_id,
            availability_zones=cast(
                List[str], self.context.networking.availability_zones),
        )
        self.i_isolated_subnets = Ec2Builder.build_subnets(
            scope=self, subnet_manifests=context.networking.isolated_subnets)
        self.i_private_subnets = Ec2Builder.build_subnets(
            scope=self, subnet_manifests=context.networking.private_subnets)
        administrator_arns: List[str] = [
        ]  # A place to add other admins if needed for KMS
        admin_principals = iam.CompositePrincipal(
            *[iam.ArnPrincipal(arn) for arn in administrator_arns],
            iam.ArnPrincipal(f"arn:aws:iam::{self.context.account_id}:root"),
        )
        self.team_kms_key: kms.Key = kms.Key(
            self,
            id="kms-key",
            removal_policy=core.RemovalPolicy.RETAIN,
            enabled=True,
            enable_key_rotation=True,
            policy=iam.PolicyDocument(statements=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=["kms:*"],
                    resources=["*"],
                    principals=[cast(iam.IPrincipal, admin_principals)],
                )
            ]),
        )
        self.team_security_group: ec2.SecurityGroup = Ec2Builder.build_team_security_group(
            scope=self,
            context=context,
            team_name=self.team_name,
            vpc=self.i_vpc)
        self.policies: List[str] = self.team_policies
        if self.context.scratch_bucket_arn:
            self.scratch_bucket: s3.Bucket = cast(
                s3.Bucket,
                s3.Bucket.from_bucket_attributes(
                    scope=self,
                    id="scratch_bucket",
                    bucket_arn=self.context.scratch_bucket_arn,
                    bucket_name=self.context.scratch_bucket_arn.split(":::")
                    [1],
                ),
            )
        else:
            raise Exception(
                "Scratch bucket was not provided in Manifest ('ScratchBucketArn')"
            )

        self.role_eks_pod = IamBuilder.build_team_role(
            scope=self,
            context=self.context,
            team_name=self.team_name,
            policy_names=self.policies,
            scratch_bucket=cast(s3.IBucket, self.scratch_bucket),
            team_kms_key=self.team_kms_key,
        )
        shared_fs_name: str = f"orbit-{context.name}-{self.team_name}-shared-fs"
        if context.shared_efs_fs_id is None:
            raise Exception(
                "Shared EFS File system ID was not provided in Manifest ('SharedEfsFsId')"
            )

        if context.shared_efs_sg_id is None:
            raise Exception(
                "Shared EFS File system security group ID was not provided in Manifest ('SharedEfsSgId')"
            )

        self.shared_fs: efs.FileSystem = cast(
            efs.FileSystem,
            efs.FileSystem.from_file_system_attributes(
                scope=self,
                id=shared_fs_name,
                file_system_id=context.shared_efs_fs_id,
                security_group=ec2.SecurityGroup.from_security_group_id(
                    scope=self,
                    id="team_sec_group",
                    security_group_id=context.shared_efs_sg_id),
            ),
        )

        self.efs_ap: efs.AccessPoint = EfsBuilder.build_file_system_access_point(
            scope=self, team_name=team_name, shared_fs=self.shared_fs)

        team_ssm_parameter_name: str = f"/orbit/{context.name}/teams/{self.team_name}/team"
        self.context_parameter: ssm.StringParameter = ssm.StringParameter(
            scope=self,
            id=team_ssm_parameter_name,
            string_value=json.dumps({
                "EfsId":
                self.shared_fs.file_system_id,
                "EfsApId":
                self.efs_ap.access_point_id,
                "EksPodRoleArn":
                self.role_eks_pod.role_arn,
                "ScratchBucket":
                self.scratch_bucket.bucket_name,
                "TeamKmsKeyArn":
                self.team_kms_key.key_arn,
                "TeamSecurityGroupId":
                self.team_security_group.security_group_id,
            }),
            type=ssm.ParameterType.STRING,
            description="Orbit Workbench Team Context.",
            parameter_name=team_ssm_parameter_name,
            simple_name=False,
            tier=ssm.ParameterTier.INTELLIGENT_TIERING,
        )
        ssm_profile_name = f"/orbit/{self.context.name}/teams/{self.team_name}/user/profiles"
        self.user_profiles: ssm.StringParameter = ssm.StringParameter(
            scope=self,
            id=ssm_profile_name,
            string_value="[]",
            type=ssm.ParameterType.STRING,
            description="Team additional profiles created by the team users",
            parameter_name=ssm_profile_name,
            simple_name=False,
            tier=ssm.ParameterTier.INTELLIGENT_TIERING,
        )