def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # elastic policy elastic_policy = iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "es:*", ], resources=["*"], ) elastic_policy.add_any_principal() self.elastic_domain = aes.Domain( self, "elastic_domain", version=aes.ElasticsearchVersion.V7_9, capacity=aes.CapacityConfig(data_node_instance_type="t3.small.elasticsearch", data_nodes=1), ebs=aes.EbsOptions(enabled=True, volume_size=10), access_policies=[elastic_policy], fine_grained_access_control=aes.AdvancedSecurityOptions( master_user_name=config.get_es_credentials()[0], master_user_password=core.SecretValue(config.get_es_credentials()[1]), ), zone_awareness=aes.ZoneAwarenessConfig(enabled=False), node_to_node_encryption=True, encryption_at_rest=aes.EncryptionAtRestOptions(enabled=True), enforce_https=True, removal_policy=core.RemovalPolicy.DESTROY, ) core.Tags.of(self.elastic_domain).add("system-id", config.get_system_id()) core.CfnOutput(self, "ESDomainEndpoint", value=self.elastic_domain.domain_endpoint)
def __init__(self, scope: core.Construct, id: str, config: Dict, vpc: ec2.Vpc, es_sg: ec2.SecurityGroup) -> None: super().__init__(scope, id) es_config = config['data']['elasticsearch'] # Build ES domain construct parameter capacity_config = es.CapacityConfig( master_node_instance_type=es_config['capacity']['masterNodes'] ['instanceType'], master_nodes=es_config['capacity']['masterNodes']['count'], data_node_instance_type=es_config['capacity']['dataNodes'] ['instanceType'], data_nodes=es_config['capacity']['dataNodes']['count'], ) vpc_options = es.VpcOptions( security_groups=[es_sg], subnets=vpc.select_subnets( subnet_group_name=es_config['subnetGroupName']).subnets, ) ebs_options = es.EbsOptions(volume_size=es_config['ebs']['volumeSize']) zone_awareness = es.ZoneAwarenessConfig( availability_zone_count=es_config['zoneAwareness']['count'], enabled=es_config['zoneAwareness']['enabled'], ) logging_options = es.LoggingOptions( app_log_enabled=es_config['logging']['appLogEnabled'], audit_log_enabled=es_config['logging']['auditLogEnabled'], slow_index_log_enabled=es_config['logging']['slowIndexLogEnabled'], slow_search_log_enabled=es_config['logging'] ['slowIearchLogEnabled']) access_policy = iam.PolicyStatement( effect=iam.Effect.ALLOW, principals=[iam.AnyPrincipal()], actions=['es:*'], resources=[ "arn:aws:es:" + config['awsRegion'] + ":" + config['awsAccount'] + ":domain/" + es_config['domainName'] + "/*" ]) # Create ES domain es.Domain( self, 'Domain', domain_name=es_config['domainName'], version=es.ElasticsearchVersion.of(es_config['version']), capacity=capacity_config, ebs=ebs_options, zone_awareness=zone_awareness, vpc_options=vpc_options, logging=logging_options, access_policies=[access_policy], )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: # Elasticsearch iam_es_statement = self.create_iam_statement_for_elasticsearch() self.es_domain = elasticsearch.Domain( scope, 'ES_Domain', version=elasticsearch.ElasticsearchVersion.V6_8, access_policies=[iam_es_statement], capacity=elasticsearch.CapacityConfig( data_node_instance_type='m3.medium.elasticsearch', data_nodes=2, master_node_instance_type='m3.large.elasticsearch', master_nodes=2))
def create_es_domain(self) -> None: """ Create Elasticsearch domain and complete configuration for lambdas that uses it. """ es_lambdas: List[aws_lambda.Function] = [ self.lambdas_["create_elastic_index_lambda"], self.lambdas_["insert_into_elastic_lambda"], self.api_lambdas_["SearchEndpointLambda"], ] esd = elasticsearch.Domain( self, id="cbers2stac", # This is the version currently used by localstack version=elasticsearch.ElasticsearchVersion.V7_7, ebs=elasticsearch.EbsOptions(enabled=True, volume_size=settings.es_volume_size), capacity=elasticsearch.CapacityConfig( data_node_instance_type=settings.es_instance_type, data_nodes=settings.es_data_nodes, ), access_policies=[ iam.PolicyStatement( actions=["es:*"], principals=[ lambda_f.grant_principal for lambda_f in es_lambdas ], # No need to specify resource, the domain is implicit ) ], ) # Add environment for lambdas for lambda_f in es_lambdas: lambda_f.add_environment("ES_ENDPOINT", esd.domain_endpoint) lambda_f.add_environment("ES_PORT", "443") lambda_f.add_environment("ES_SSL", "YES")
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) dev_domain = es.Domain(self, "QueueingDomain", version=es.ElasticsearchVersion.V7_9, enable_version_upgrade=True) self.elasticSearch = es.CfnDomain( self, "CDKElasticSearch", domain_name="cdk-elasticsearch", elasticsearch_version="7.4", access_policies={ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "AWS": ["*"] }, "Action": ["es:*"], "Resource": "arn:aws:es:us-east-1:755723993001:domain/cdk-test/*" }] }, elasticsearch_cluster_config=es.CfnDomain. ElasticsearchClusterConfigProperty( instance_count=1, instance_type="t3.small.elasticsearch", zone_awareness_enabled=False), ebs_options=es.CfnDomain.EBSOptionsProperty(ebs_enabled=True, iops=0, volume_size=20, volume_type="gp2"), snapshot_options=es.CfnDomain.SnapshotOptionsProperty( automated_snapshot_start_hour=0))
def __init__(self, scope: core.Construct, id: str, datalake: DataLakeLayer, **kwargs) -> None: super().__init__(scope, id, **kwargs) # self.encryption_key = kms.Key( # self, 'EncryptionKey', # removal_policy=core.RemovalPolicy.DESTROY, # enable_key_rotation=True) policy = iam.PolicyStatement(sid='Allow-by-IPAddress', actions=['es:*'], principals=[iam.AnyPrincipal()], resources=['*']) # Not supported with ES in Vpc mode. # policy.add_condition('IpAddress',{ # 'aws:SourceIp':'74.102.88.0/24' # }) self.security_group = ec2.SecurityGroup( self, 'SecurityGroup', vpc=datalake.vpc, allow_all_outbound=True, description='Elastic Search Security Group') self.security_group.add_ingress_rule(peer=ec2.Peer.any_ipv4(), connection=ec2.Port.all_traffic(), description='Allow all') self.search = es.Domain( self, 'SearchCluster', version=es.ElasticsearchVersion.V7_9, enforce_https=True, node_to_node_encryption=True, capacity=es.CapacityConfig( master_nodes=3, #warm_nodes=len(self.vpc.availability_zones), data_nodes=2 # len(self.vpc.availability_zones), ), zone_awareness=es.ZoneAwarenessConfig( availability_zone_count=2 #len(self.vpc.availability_zones) ), # encryption_at_rest=es.EncryptionAtRestOptions( # enabled=False, # kms_key=self.encryption_key # ), vpc_options=es.VpcOptions(subnets=datalake.vpc.private_subnets, security_groups=[self.security_group]), logging=es.LoggingOptions( app_log_enabled=True, app_log_group=logs.LogGroup( self, 'SearchAppLogGroup', removal_policy=core.RemovalPolicy.DESTROY, retention=logs.RetentionDays.ONE_MONTH), audit_log_enabled=False, audit_log_group=logs.LogGroup( self, 'SearchAuditLogs', removal_policy=core.RemovalPolicy.DESTROY, retention=logs.RetentionDays.ONE_MONTH), slow_index_log_enabled=True, slow_index_log_group=logs.LogGroup( self, 'SearchSlowIndex', removal_policy=core.RemovalPolicy.DESTROY, retention=logs.RetentionDays.ONE_MONTH), ), access_policies=[policy]) # Configre the LinkedServiceRole to update the VPC serviceLinkedRole = core.CfnResource( self, 'LinkedServiceRole', type="AWS::IAM::ServiceLinkedRole", properties={ 'AWSServiceName': "es.amazonaws.com", 'Description': "Role for ES to access resources in my VPC" }) self.search.node.add_dependency(serviceLinkedRole)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here ########################################################################### # AWS SECRETS MANAGER - Templated secret ########################################################################### # templated_secret = aws_secretsmanager.Secret(self, "TemplatedSecret", # generate_secret_string=aws_secretsmanager.SecretStringGenerator( # secret_string_template= "{\"username\":\"cleanbox\"}", # generate_string_key="password" # ) # ) ########################################################################### # CUSTOM CLOUDFORMATION RESOURCE ########################################################################### # customlambda = aws_lambda.Function(self,'customconfig', # handler='customconfig.on_event', # runtime=aws_lambda.Runtime.PYTHON_3_7, # code=aws_lambda.Code.asset('customconfig'), # ) # customlambda_statement = aws_iam.PolicyStatement(actions=["events:PutRule"], conditions=None, effect=None, not_actions=None, not_principals=None, not_resources=None, principals=None, resources=["*"], sid=None) # customlambda.add_to_role_policy(statement=customlambda_statement) # my_provider = cr.Provider(self, "MyProvider", # on_event_handler=customlambda, # # is_complete_handler=is_complete, # optional async "waiter" # log_retention=logs.RetentionDays.SIX_MONTHS # ) # CustomResource(self, 'customconfigresource', service_token=my_provider.service_token) ########################################################################### # AWS LAMBDA FUNCTIONS ########################################################################### sqs_to_elastic_cloud = aws_lambda.Function( self, 'sqs_to_elastic_cloud', handler='sqs_to_elastic_cloud.lambda_handler', runtime=aws_lambda.Runtime.PYTHON_3_7, code=aws_lambda.Code.asset('sqs_to_elastic_cloud'), memory_size=4096, timeout=core.Duration.seconds(300), log_retention=logs.RetentionDays.ONE_DAY) sqs_to_elasticsearch_service = aws_lambda.Function( self, 'sqs_to_elasticsearch_service', handler='sqs_to_elasticsearch_service.lambda_handler', runtime=aws_lambda.Runtime.PYTHON_3_7, code=aws_lambda.Code.asset('sqs_to_elasticsearch_service'), memory_size=4096, timeout=core.Duration.seconds(300), log_retention=logs.RetentionDays.ONE_DAY) # sqs_to_elasticsearch_service.add_environment("kinesis_firehose_name", "-") # sqs_to_elastic_cloud.add_environment("index_name", "-") ########################################################################### # AWS LAMBDA FUNCTIONS ########################################################################### # sqs_to_elasticsearch_service_permission = aws_lambda.Permission(*, principal, action=None, event_source_token=None, scope=None, source_account=None, source_arn=None) ########################################################################### # AMAZON S3 BUCKETS ########################################################################### access_log_bucket = aws_s3.Bucket(self, "access_log_bucket") kinesis_log_bucket = aws_s3.Bucket(self, "kinesis_log_bucket") ########################################################################### # LAMBDA SUPPLEMENTAL POLICIES ########################################################################### lambda_supplemental_policy_statement = aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=["s3:Get*", "s3:Head*", "s3:List*", "firehose:*"], resources=["*"]) sqs_to_elastic_cloud.add_to_role_policy( lambda_supplemental_policy_statement) sqs_to_elasticsearch_service.add_to_role_policy( lambda_supplemental_policy_statement) ########################################################################### # AWS SNS TOPICS ########################################################################### access_log_topic = aws_sns.Topic(self, "access_log_topic") ########################################################################### # ADD AMAZON S3 BUCKET NOTIFICATIONS ########################################################################### access_log_bucket.add_event_notification( aws_s3.EventType.OBJECT_CREATED, aws_s3_notifications.SnsDestination(access_log_topic)) ########################################################################### # AWS SQS QUEUES ########################################################################### sqs_to_elasticsearch_service_queue_iqueue = aws_sqs.Queue( self, "sqs_to_elasticsearch_service_queue_dlq") sqs_to_elasticsearch_service_queue_dlq = aws_sqs.DeadLetterQueue( max_receive_count=10, queue=sqs_to_elasticsearch_service_queue_iqueue) sqs_to_elasticsearch_service_queue = aws_sqs.Queue( self, "sqs_to_elasticsearch_service_queue", visibility_timeout=core.Duration.seconds(301), dead_letter_queue=sqs_to_elasticsearch_service_queue_dlq) sqs_to_elastic_cloud_queue_iqueue = aws_sqs.Queue( self, "sqs_to_elastic_cloud_queue_dlq") sqs_to_elastic_cloud_queue_dlq = aws_sqs.DeadLetterQueue( max_receive_count=10, queue=sqs_to_elastic_cloud_queue_iqueue) sqs_to_elastic_cloud_queue = aws_sqs.Queue( self, "sqs_to_elastic_cloud_queue", visibility_timeout=core.Duration.seconds(301), dead_letter_queue=sqs_to_elastic_cloud_queue_dlq) ########################################################################### # AWS SNS TOPIC SUBSCRIPTIONS ########################################################################### access_log_topic.add_subscription( aws_sns_subscriptions.SqsSubscription(sqs_to_elastic_cloud_queue)) access_log_topic.add_subscription( aws_sns_subscriptions.SqsSubscription( sqs_to_elasticsearch_service_queue)) ########################################################################### # AWS LAMBDA SQS EVENT SOURCE ########################################################################### sqs_to_elastic_cloud.add_event_source( SqsEventSource(sqs_to_elastic_cloud_queue, batch_size=10)) sqs_to_elasticsearch_service.add_event_source( SqsEventSource(sqs_to_elasticsearch_service_queue, batch_size=10)) ########################################################################### # AWS ELASTICSEARCH DOMAIN ########################################################################### ########################################################################### # AWS ELASTICSEARCH DOMAIN ACCESS POLICY ########################################################################### this_aws_account = aws_iam.AccountPrincipal(account_id="012345678912") # s3_to_elasticsearch_access_logs_domain_access_policy_statement = aws_iam.PolicyStatement( # principals=[this_aws_account], # effect=aws_iam.Effect.ALLOW, # actions=["es:*"], # resources=["*"] # ) # s3_to_elasticsearch_access_logs_domain_access_policy_statement_list=[] # s3_to_elasticsearch_access_logs_domain_access_policy_statement_list.append(s3_to_elasticsearch_access_logs_domain_access_policy_statement) s3_to_elasticsearch_access_logs_domain = aws_elasticsearch.Domain( self, "s3-to-elasticsearch-access-logs-domain", # access_policies=s3_to_elasticsearch_access_logs_domain_access_policy_statement_list, version=aws_elasticsearch.ElasticsearchVersion.V7_1, capacity={ "master_nodes": 3, "data_nodes": 4 }, ebs={"volume_size": 100}, zone_awareness={"availability_zone_count": 2}, logging={ "slow_search_log_enabled": True, "app_log_enabled": True, "slow_index_log_enabled": True }) ########################################################################### # AMAZON COGNITO USER POOL ########################################################################### s3_to_elasticsearch_user_pool = aws_cognito.UserPool( self, "s3-to-elasticsearch-access-logs-pool", account_recovery=None, auto_verify=None, custom_attributes=None, email_settings=None, enable_sms_role=None, lambda_triggers=None, mfa=None, mfa_second_factor=None, password_policy=None, self_sign_up_enabled=None, sign_in_aliases=aws_cognito.SignInAliases(email=True, phone=None, preferred_username=None, username=True), sign_in_case_sensitive=None, sms_role=None, sms_role_external_id=None, standard_attributes=None, user_invitation=None, user_pool_name=None, user_verification=None) ########################################################################### # AMAZON KINESIS FIREHOSE STREAM ########################################################################### # kinesis_policy_statement = aws_iam.PolicyStatement( # effect=aws_iam.Effect.ALLOW, # # actions=["es:*", "s3:*", "kms:*", "kinesis:*", "lambda:*"], # actions=["*"], # resources=["*"] # ) # kinesis_policy_document = aws_iam.PolicyDocument() # kinesis_policy_document.add_statements(kinesis_policy_statement) kinesis_firehose_stream_role = aws_iam.Role( self, "BaseVPCIAMLogRole", assumed_by=aws_iam.ServicePrincipal('firehose.amazonaws.com'), role_name=None, inline_policies={ "AllowLogAccess": aws_iam.PolicyDocument( assign_sids=False, statements=[ aws_iam.PolicyStatement(actions=[ '*', 'es:*', 'logs:PutLogEvents', 'logs:DescribeLogGroups', 'logs:DescribeLogsStreams' ], effect=aws_iam.Effect('ALLOW'), resources=['*']) ]) }) RetryOptions = aws_kinesisfirehose.CfnDeliveryStream.ElasticsearchRetryOptionsProperty( duration_in_seconds=300) s3_configuration = aws_kinesisfirehose.CfnDeliveryStream.S3DestinationConfigurationProperty( bucket_arn=kinesis_log_bucket.bucket_arn, role_arn=kinesis_firehose_stream_role.role_arn) ElasticsearchDestinationConfiguration = aws_kinesisfirehose.CfnDeliveryStream.ElasticsearchDestinationConfigurationProperty( # "BufferingHints" : ElasticsearchBufferingHints, # "CloudWatchLoggingOptions" : CloudWatchLoggingOptions, # "ClusterEndpoint" : String, domain_arn=s3_to_elasticsearch_access_logs_domain.domain_arn, index_name="s3-to-elasticsearch-accesslogs", index_rotation_period="OneDay", # "ProcessingConfiguration" : ProcessingConfiguration, retry_options=RetryOptions, role_arn=kinesis_firehose_stream_role.role_arn, # "S3BackupMode" : String, s3_configuration=s3_configuration # "TypeName" : String # "VpcConfiguration" : VpcConfiguration ) kinesis_firehose_stream = aws_kinesisfirehose.CfnDeliveryStream( self, "kinesis_firehose_stream", delivery_stream_encryption_configuration_input=None, delivery_stream_name=None, delivery_stream_type=None, elasticsearch_destination_configuration= ElasticsearchDestinationConfiguration, extended_s3_destination_configuration=None, http_endpoint_destination_configuration=None, kinesis_stream_source_configuration=None, redshift_destination_configuration=None, s3_destination_configuration=None, splunk_destination_configuration=None, tags=None) sqs_to_elasticsearch_service.add_environment( "FIREHOSE_NAME", kinesis_firehose_stream.ref) sqs_to_elasticsearch_service.add_environment( "QUEUEURL", sqs_to_elasticsearch_service_queue.queue_url) sqs_to_elasticsearch_service.add_environment("DEBUG", "False") sqs_to_elastic_cloud.add_environment("ELASTICCLOUD_SECRET_NAME", "-") sqs_to_elastic_cloud.add_environment("ELASTIC_CLOUD_ID", "-") sqs_to_elastic_cloud.add_environment("ELASTIC_CLOUD_PASSWORD", "-") sqs_to_elastic_cloud.add_environment("ELASTIC_CLOUD_USERNAME", "-") sqs_to_elastic_cloud.add_environment( "QUEUEURL", sqs_to_elastic_cloud_queue.queue_url) sqs_to_elastic_cloud.add_environment("DEBUG", "False")
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here ########################################################################### # AWS SECRETS MANAGER - Templated secret ########################################################################### # templated_secret = aws_secretsmanager.Secret(self, "TemplatedSecret", # generate_secret_string=aws_secretsmanager.SecretStringGenerator( # secret_string_template= "{\"username\":\"cleanbox\"}", # generate_string_key="password" # ) # ) ########################################################################### # CUSTOM CLOUDFORMATION RESOURCE ########################################################################### # customlambda = aws_lambda.Function(self,'customconfig', # handler='customconfig.on_event', # runtime=aws_lambda.Runtime.PYTHON_3_7, # code=aws_lambda.Code.asset('customconfig'), # ) # customlambda_statement = aws_iam.PolicyStatement(actions=["events:PutRule"], conditions=None, effect=None, not_actions=None, not_principals=None, not_resources=None, principals=None, resources=["*"], sid=None) # customlambda.add_to_role_policy(statement=customlambda_statement) # my_provider = cr.Provider(self, "MyProvider", # on_event_handler=customlambda, # # is_complete_handler=is_complete, # optional async "waiter" # log_retention=logs.RetentionDays.SIX_MONTHS # ) # CustomResource(self, 'customconfigresource', service_token=my_provider.service_token) ########################################################################### # AWS LAMBDA FUNCTIONS ########################################################################### sqs_to_elastic_cloud = aws_lambda.Function( self, 'sqs_to_elastic_cloud', handler='sqs_to_elastic_cloud.lambda_handler', runtime=aws_lambda.Runtime.PYTHON_3_7, code=aws_lambda.Code.asset('sqs_to_elastic_cloud'), memory_size=4096, timeout=core.Duration.seconds(301), log_retention=logs.RetentionDays.ONE_DAY) sqs_to_elasticsearch_service = aws_lambda.Function( self, 'sqs_to_elasticsearch_service', handler='sqs_to_elasticsearch_service.lambda_handler', runtime=aws_lambda.Runtime.PYTHON_3_7, code=aws_lambda.Code.asset('sqs_to_elasticsearch_service'), memory_size=4096, timeout=core.Duration.seconds(301), log_retention=logs.RetentionDays.ONE_DAY) ########################################################################### # AWS LAMBDA FUNCTIONS ########################################################################### ########################################################################### # AMAZON S3 BUCKETS ########################################################################### cloudtrail_log_bucket = aws_s3.Bucket(self, "cloudtrail_log_bucket") ########################################################################### # LAMBDA SUPPLEMENTAL POLICIES ########################################################################### lambda_supplemental_policy_statement = aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=["s3:Get*", "s3:Head*", "s3:List*", "firehose:*", "es:*"], resources=["*"]) sqs_to_elastic_cloud.add_to_role_policy( lambda_supplemental_policy_statement) sqs_to_elasticsearch_service.add_to_role_policy( lambda_supplemental_policy_statement) ########################################################################### # AWS SNS TOPICS ########################################################################### cloudtrail_log_topic = aws_sns.Topic(self, "cloudtrail_log_topic") ########################################################################### # ADD AMAZON S3 BUCKET NOTIFICATIONS ########################################################################### cloudtrail_log_bucket.add_event_notification( aws_s3.EventType.OBJECT_CREATED, aws_s3_notifications.SnsDestination(cloudtrail_log_topic)) ########################################################################### # AWS SQS QUEUES ########################################################################### sqs_to_elasticsearch_service_queue_iqueue = aws_sqs.Queue( self, "sqs_to_elasticsearch_service_queue_dlq") sqs_to_elasticsearch_service_queue_dlq = aws_sqs.DeadLetterQueue( max_receive_count=10, queue=sqs_to_elasticsearch_service_queue_iqueue) sqs_to_elasticsearch_service_queue = aws_sqs.Queue( self, "sqs_to_elasticsearch_service_queue", visibility_timeout=core.Duration.seconds(300), dead_letter_queue=sqs_to_elasticsearch_service_queue_dlq) sqs_to_elastic_cloud_queue_iqueue = aws_sqs.Queue( self, "sqs_to_elastic_cloud_queue_dlq") sqs_to_elastic_cloud_queue_dlq = aws_sqs.DeadLetterQueue( max_receive_count=10, queue=sqs_to_elastic_cloud_queue_iqueue) sqs_to_elastic_cloud_queue = aws_sqs.Queue( self, "sqs_to_elastic_cloud_queue", visibility_timeout=core.Duration.seconds(300), dead_letter_queue=sqs_to_elastic_cloud_queue_dlq) ########################################################################### # AWS SNS TOPIC SUBSCRIPTIONS ########################################################################### cloudtrail_log_topic.add_subscription( aws_sns_subscriptions.SqsSubscription(sqs_to_elastic_cloud_queue)) cloudtrail_log_topic.add_subscription( aws_sns_subscriptions.SqsSubscription( sqs_to_elasticsearch_service_queue)) ########################################################################### # AWS LAMBDA SQS EVENT SOURCE ########################################################################### sqs_to_elastic_cloud.add_event_source( SqsEventSource(sqs_to_elastic_cloud_queue, batch_size=10)) sqs_to_elasticsearch_service.add_event_source( SqsEventSource(sqs_to_elasticsearch_service_queue, batch_size=10)) ########################################################################### # AWS ELASTICSEARCH DOMAIN ########################################################################### ########################################################################### # AWS ELASTICSEARCH DOMAIN ACCESS POLICY ########################################################################### this_aws_account = aws_iam.AccountPrincipal(account_id="012345678912") s3_to_elasticsearch_cloudtrail_logs_domain = aws_elasticsearch.Domain( self, "s3-to-elasticsearch-cloudtrail-logs-domain", version=aws_elasticsearch.ElasticsearchVersion.V7_1, capacity={ "master_nodes": 3, "data_nodes": 4 }, ebs={"volume_size": 100}, zone_awareness={"availability_zone_count": 2}, logging={ "slow_search_log_enabled": True, "app_log_enabled": True, "slow_index_log_enabled": True }) ########################################################################### # AMAZON COGNITO USER POOL ########################################################################### s3_to_elasticsearch_user_pool = aws_cognito.UserPool( self, "s3-to-elasticsearch-cloudtrial-logs-pool", account_recovery=None, auto_verify=None, custom_attributes=None, email_settings=None, enable_sms_role=None, lambda_triggers=None, mfa=None, mfa_second_factor=None, password_policy=None, self_sign_up_enabled=None, sign_in_aliases=aws_cognito.SignInAliases(email=True, phone=None, preferred_username=None, username=True), sign_in_case_sensitive=None, sms_role=None, sms_role_external_id=None, standard_attributes=None, user_invitation=None, user_pool_name=None, user_verification=None) sqs_to_elasticsearch_service.add_environment( "ELASTICSEARCH_HOST", s3_to_elasticsearch_cloudtrail_logs_domain.domain_endpoint) sqs_to_elasticsearch_service.add_environment( "QUEUEURL", sqs_to_elasticsearch_service_queue.queue_url) sqs_to_elasticsearch_service.add_environment("DEBUG", "False") sqs_to_elastic_cloud.add_environment("ELASTICCLOUD_SECRET_NAME", "-") sqs_to_elastic_cloud.add_environment("ELASTIC_CLOUD_ID", "-") sqs_to_elastic_cloud.add_environment("ELASTIC_CLOUD_PASSWORD", "-") sqs_to_elastic_cloud.add_environment("ELASTIC_CLOUD_USERNAME", "-") sqs_to_elastic_cloud.add_environment( "QUEUEURL", sqs_to_elastic_cloud_queue.queue_url) sqs_to_elastic_cloud.add_environment("DEBUG", "False") ########################################################################### # AWS COGNITO USER POOL ########################################################################### allevents_trail = aws_cloudtrail.Trail( self, "allevents_trail", bucket=cloudtrail_log_bucket, cloud_watch_log_group=None, cloud_watch_logs_retention=None, enable_file_validation=None, encryption_key=None, include_global_service_events=None, is_multi_region_trail=True, kms_key=None, management_events=aws_cloudtrail.ReadWriteType("ALL"), s3_key_prefix=None, send_to_cloud_watch_logs=False, sns_topic=None, trail_name=None)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) ########################################################################### # AWS LAMBDA FUNCTIONS ########################################################################### parse_image_list_file = aws_lambda.Function( self, 'parse_image_list_file', handler='parse_image_list_file.lambda_handler', runtime=aws_lambda.Runtime.PYTHON_3_7, code=aws_lambda.Code.asset('parse_image_list_file'), memory_size=10240, timeout=core.Duration.seconds(300), log_retention=aws_logs.RetentionDays.ONE_DAY) list_objects = aws_lambda.Function( self, 'list_objects', handler='list_objects.lambda_handler', runtime=aws_lambda.Runtime.PYTHON_3_7, code=aws_lambda.Code.asset('list_objects'), memory_size=4096, timeout=core.Duration.seconds(300), log_retention=aws_logs.RetentionDays.ONE_DAY) get_size_and_store = aws_lambda.Function( self, 'get_size_and_store', handler='get_size_and_store.lambda_handler', runtime=aws_lambda.Runtime.PYTHON_3_7, code=aws_lambda.Code.asset('get_size_and_store'), memory_size=4096, timeout=core.Duration.seconds(300), log_retention=aws_logs.RetentionDays.ONE_DAY) ########################################################################### # AMAZON S3 BUCKETS ########################################################################### images_bucket = aws_s3.Bucket(self, "images_bucket") ########################################################################### # LAMBDA SUPPLEMENTAL POLICIES ########################################################################### lambda_supplemental_policy_statement = aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=["s3:Get*", "s3:Head*", "s3:List*", "sqs:*", "es:*"], resources=["*"]) parse_image_list_file.add_to_role_policy( lambda_supplemental_policy_statement) list_objects.add_to_role_policy(lambda_supplemental_policy_statement) get_size_and_store.add_to_role_policy( lambda_supplemental_policy_statement) ########################################################################### # AWS SNS TOPICS ########################################################################### # notification_topic = aws_sns.Topic(self, "notification_topic") ########################################################################### # ADD AMAZON S3 BUCKET NOTIFICATIONS ########################################################################### images_bucket.add_event_notification( aws_s3.EventType.OBJECT_CREATED, aws_s3_notifications.LambdaDestination(parse_image_list_file)) ########################################################################### # AWS SQS QUEUES ########################################################################### comprehend_queue_iqueue = aws_sqs.Queue(self, "comprehend_queue_iqueue") comprehend_queue_iqueue_dlq = aws_sqs.DeadLetterQueue( max_receive_count=10, queue=comprehend_queue_iqueue) comprehend_queue = aws_sqs.Queue( self, "comprehend_queue", visibility_timeout=core.Duration.seconds(301), dead_letter_queue=comprehend_queue_iqueue_dlq) rekognition_queue_iqueue = aws_sqs.Queue(self, "rekognition_queue_iqueue") rekognition_queue_dlq = aws_sqs.DeadLetterQueue( max_receive_count=10, queue=rekognition_queue_iqueue) rekognition_queue = aws_sqs.Queue( self, "rekognition_queue", visibility_timeout=core.Duration.seconds(301), dead_letter_queue=rekognition_queue_dlq) object_queue_iqueue = aws_sqs.Queue(self, "object_queue_iqueue") object_queue_dlq = aws_sqs.DeadLetterQueue(max_receive_count=10, queue=object_queue_iqueue) object_queue = aws_sqs.Queue( self, "object_queue", visibility_timeout=core.Duration.seconds(301), dead_letter_queue=object_queue_dlq) ########################################################################### # AWS LAMBDA SQS EVENT SOURCE ########################################################################### get_size_and_store.add_event_source( SqsEventSource(object_queue, batch_size=10)) ########################################################################### # AWS ELASTICSEARCH DOMAIN ########################################################################### s3workflow_domain = aws_elasticsearch.Domain( self, "s3workflow_domain", version=aws_elasticsearch.ElasticsearchVersion.V7_1, capacity={ "master_nodes": 3, "data_nodes": 4 }, ebs={"volume_size": 100}, zone_awareness={"availability_zone_count": 2}, logging={ "slow_search_log_enabled": True, "app_log_enabled": True, "slow_index_log_enabled": True }) ########################################################################### # AMAZON COGNITO USER POOL ########################################################################### s3workflow_pool = aws_cognito.UserPool( self, "s3workflow-pool", account_recovery=None, auto_verify=None, custom_attributes=None, email_settings=None, enable_sms_role=None, lambda_triggers=None, mfa=None, mfa_second_factor=None, password_policy=None, self_sign_up_enabled=None, sign_in_aliases=aws_cognito.SignInAliases(email=True, phone=None, preferred_username=None, username=True), sign_in_case_sensitive=None, sms_role=None, sms_role_external_id=None, standard_attributes=None, user_invitation=None, user_pool_name=None, user_verification=None) ########################################################################### # AMAZON VPC ########################################################################### vpc = aws_ec2.Vpc(self, "s3workflowVPC", max_azs=3) # default is all AZs in region ########################################################################### # AMAZON ECS CLUSTER ########################################################################### cluster = aws_ecs.Cluster(self, "s3", vpc=vpc) ########################################################################### # AMAZON ECS Repositories ########################################################################### rekognition_repository = aws_ecr.Repository( self, "rekognition_repository", image_scan_on_push=True, removal_policy=core.RemovalPolicy("DESTROY")) comprehend_repository = aws_ecr.Repository( self, "comprehend_repository", image_scan_on_push=True, removal_policy=core.RemovalPolicy("DESTROY")) ########################################################################### # AMAZON ECS Roles and Policies ########################################################################### task_execution_policy_statement = aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=[ "logs:*", "ecs:*", "ec2:*", "elasticloadbalancing:*", "ecr:*" ], resources=["*"]) task_execution_policy_document = aws_iam.PolicyDocument() task_execution_policy_document.add_statements( task_execution_policy_statement) task_execution_policy = aws_iam.Policy( self, "task_execution_policy", document=task_execution_policy_document) task_execution_role = aws_iam.Role( self, "task_execution_role", assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com')) task_execution_role.attach_inline_policy(task_execution_policy) task_policy_statement = aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=[ "logs:*", "xray:*", "sqs:*", "s3:*", "rekognition:*", "comprehend:*", "es:*" ], resources=["*"]) task_policy_document = aws_iam.PolicyDocument() task_policy_document.add_statements(task_policy_statement) task_policy = aws_iam.Policy(self, "task_policy", document=task_policy_document) task_role = aws_iam.Role( self, "task_role", assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com')) task_role.attach_inline_policy(task_policy) ########################################################################### # AMAZON ECS Task definitions ########################################################################### rekognition_task_definition = aws_ecs.TaskDefinition( self, "rekognition_task_definition", compatibility=aws_ecs.Compatibility("FARGATE"), cpu="1024", # ipc_mode=None, memory_mib="2048", network_mode=aws_ecs.NetworkMode("AWS_VPC"), # pid_mode=None, #Not supported in Fargate and Windows containers # placement_constraints=None, execution_role=task_execution_role, # family=None, # proxy_configuration=None, task_role=task_role # volumes=None ) comprehend_task_definition = aws_ecs.TaskDefinition( self, "comprehend_task_definition", compatibility=aws_ecs.Compatibility("FARGATE"), cpu="1024", # ipc_mode=None, memory_mib="2048", network_mode=aws_ecs.NetworkMode("AWS_VPC"), # pid_mode=None, #Not supported in Fargate and Windows containers # placement_constraints=None, execution_role=task_execution_role, # family=None, # proxy_configuration=None, task_role=task_role # volumes=None ) ########################################################################### # AMAZON ECS Images ########################################################################### rekognition_ecr_image = aws_ecs.EcrImage( repository=rekognition_repository, tag="latest") comprehend_ecr_image = aws_ecs.EcrImage( repository=comprehend_repository, tag="latest") ########################################################################### # ENVIRONMENT VARIABLES ########################################################################### environment_variables = {} environment_variables["COMPREHEND_QUEUE"] = comprehend_queue.queue_url environment_variables[ "REKOGNITION_QUEUE"] = rekognition_queue.queue_url environment_variables["IMAGES_BUCKET"] = images_bucket.bucket_name environment_variables[ "ELASTICSEARCH_HOST"] = s3workflow_domain.domain_endpoint parse_image_list_file.add_environment( "ELASTICSEARCH_HOST", s3workflow_domain.domain_endpoint) parse_image_list_file.add_environment("QUEUEURL", rekognition_queue.queue_url) parse_image_list_file.add_environment("DEBUG", "False") parse_image_list_file.add_environment("BUCKET", "-") parse_image_list_file.add_environment("KEY", "-") list_objects.add_environment("QUEUEURL", object_queue.queue_url) list_objects.add_environment("ELASTICSEARCH_HOST", s3workflow_domain.domain_endpoint) list_objects.add_environment("S3_BUCKET_NAME", images_bucket.bucket_name) list_objects.add_environment("S3_BUCKET_PREFIX", "images/") list_objects.add_environment("S3_BUCKET_SUFFIX", "") list_objects.add_environment("LOGGING_LEVEL", "INFO") get_size_and_store.add_environment("QUEUEURL", object_queue.queue_url) get_size_and_store.add_environment("ELASTICSEARCH_HOST", s3workflow_domain.domain_endpoint) get_size_and_store.add_environment("S3_BUCKET_NAME", images_bucket.bucket_name) get_size_and_store.add_environment("S3_BUCKET_PREFIX", "images/") get_size_and_store.add_environment("S3_BUCKET_SUFFIX", "") get_size_and_store.add_environment("LOGGING_LEVEL", "INFO") ########################################################################### # ECS Log Drivers ########################################################################### rekognition_task_log_driver = aws_ecs.LogDriver.aws_logs( stream_prefix="s3workflow", log_retention=aws_logs.RetentionDays("ONE_DAY")) comprehend_task_log_driver = aws_ecs.LogDriver.aws_logs( stream_prefix="s3workflow", log_retention=aws_logs.RetentionDays("ONE_DAY")) ########################################################################### # ECS Task Definitions ########################################################################### rekognition_task_definition.add_container( "rekognition_task_definition", image=rekognition_ecr_image, memory_reservation_mib=1024, environment=environment_variables, logging=rekognition_task_log_driver) comprehend_task_definition.add_container( "comprehend_task_definition", image=comprehend_ecr_image, memory_reservation_mib=1024, environment=environment_variables, logging=comprehend_task_log_driver) ########################################################################### # AWS ROUTE53 HOSTED ZONE ########################################################################### hosted_zone = aws_route53.HostedZone( self, "hosted_zone", zone_name="s3workflow.com", comment="private hosted zone for s3workflow system") hosted_zone.add_vpc(vpc)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) eks_vpc = ec2.Vpc( self, "VPC", cidr="10.0.0.0/16" ) # Create IAM Role For EC2 bastion instance to be able to manage the cluster bastion_role = iam.Role( self, "BastionRole", assumed_by=iam.CompositePrincipal( iam.ServicePrincipal("ec2.amazonaws.com"), iam.AccountRootPrincipal() ) ) self.bastion_role = bastion_role # Create EC2 Instance Profile for that Role instance_profile = iam.CfnInstanceProfile( self, "InstanceProfile", roles=[bastion_role.role_name] ) # Create SecurityGroup for the Control Plane ENIs eks_security_group = ec2.SecurityGroup( self, "EKSSecurityGroup", vpc=eks_vpc, allow_all_outbound=True ) eks_security_group.add_ingress_rule( ec2.Peer.ipv4('10.0.0.0/16'), ec2.Port.all_traffic() ) # Create an EKS Cluster eks_cluster = eks.Cluster( self, "cluster", vpc=eks_vpc, masters_role=bastion_role, default_capacity_type=eks.DefaultCapacityType.NODEGROUP, default_capacity_instance=ec2.InstanceType("m5.large"), default_capacity=2, security_group=eks_security_group, endpoint_access=eks.EndpointAccess.PUBLIC_AND_PRIVATE, version=eks.KubernetesVersion.V1_18 ) # Deploy ALB Ingress Controller # Create the k8s Service account and corresponding IAM Role mapped via IRSA alb_service_account = eks_cluster.add_service_account( "aws-load-balancer-controller", name="aws-load-balancer-controller", namespace="kube-system" ) # Create the PolicyStatements to attach to the role # I couldn't find a way to get this to work with a PolicyDocument and there are 10 of these alb_policy_statement_json_1 = { "Effect": "Allow", "Action": [ "acm:DescribeCertificate", "acm:ListCertificates", "acm:GetCertificate" ], "Resource": "*" } alb_policy_statement_json_2 = { "Effect": "Allow", "Action": [ "ec2:AuthorizeSecurityGroupIngress", "ec2:CreateSecurityGroup", "ec2:CreateTags", "ec2:DeleteTags", "ec2:DeleteSecurityGroup", "ec2:DescribeAccountAttributes", "ec2:DescribeAddresses", "ec2:DescribeInstances", "ec2:DescribeInstanceStatus", "ec2:DescribeInternetGateways", "ec2:DescribeNetworkInterfaces", "ec2:DescribeSecurityGroups", "ec2:DescribeSubnets", "ec2:DescribeTags", "ec2:DescribeVpcs", "ec2:ModifyInstanceAttribute", "ec2:ModifyNetworkInterfaceAttribute", "ec2:RevokeSecurityGroupIngress" ], "Resource": "*" } alb_policy_statement_json_3 = { "Effect": "Allow", "Action": [ "elasticloadbalancing:AddListenerCertificates", "elasticloadbalancing:AddTags", "elasticloadbalancing:CreateListener", "elasticloadbalancing:CreateLoadBalancer", "elasticloadbalancing:CreateRule", "elasticloadbalancing:CreateTargetGroup", "elasticloadbalancing:DeleteListener", "elasticloadbalancing:DeleteLoadBalancer", "elasticloadbalancing:DeleteRule", "elasticloadbalancing:DeleteTargetGroup", "elasticloadbalancing:DeregisterTargets", "elasticloadbalancing:DescribeListenerCertificates", "elasticloadbalancing:DescribeListeners", "elasticloadbalancing:DescribeLoadBalancers", "elasticloadbalancing:DescribeLoadBalancerAttributes", "elasticloadbalancing:DescribeRules", "elasticloadbalancing:DescribeSSLPolicies", "elasticloadbalancing:DescribeTags", "elasticloadbalancing:DescribeTargetGroups", "elasticloadbalancing:DescribeTargetGroupAttributes", "elasticloadbalancing:DescribeTargetHealth", "elasticloadbalancing:ModifyListener", "elasticloadbalancing:ModifyLoadBalancerAttributes", "elasticloadbalancing:ModifyRule", "elasticloadbalancing:ModifyTargetGroup", "elasticloadbalancing:ModifyTargetGroupAttributes", "elasticloadbalancing:RegisterTargets", "elasticloadbalancing:RemoveListenerCertificates", "elasticloadbalancing:RemoveTags", "elasticloadbalancing:SetIpAddressType", "elasticloadbalancing:SetSecurityGroups", "elasticloadbalancing:SetSubnets", "elasticloadbalancing:SetWebAcl" ], "Resource": "*" } alb_policy_statement_json_4 = { "Effect": "Allow", "Action": [ "iam:CreateServiceLinkedRole", "iam:GetServerCertificate", "iam:ListServerCertificates" ], "Resource": "*" } alb_policy_statement_json_5 = { "Effect": "Allow", "Action": [ "cognito-idp:DescribeUserPoolClient" ], "Resource": "*" } alb_policy_statement_json_6 = { "Effect": "Allow", "Action": [ "waf-regional:GetWebACLForResource", "waf-regional:GetWebACL", "waf-regional:AssociateWebACL", "waf-regional:DisassociateWebACL" ], "Resource": "*" } alb_policy_statement_json_7 = { "Effect": "Allow", "Action": [ "tag:GetResources", "tag:TagResources" ], "Resource": "*" } alb_policy_statement_json_8 = { "Effect": "Allow", "Action": [ "waf:GetWebACL" ], "Resource": "*" } alb_policy_statement_json_9 = { "Effect": "Allow", "Action": [ "wafv2:GetWebACL", "wafv2:GetWebACLForResource", "wafv2:AssociateWebACL", "wafv2:DisassociateWebACL" ], "Resource": "*" } alb_policy_statement_json_10 = { "Effect": "Allow", "Action": [ "shield:DescribeProtection", "shield:GetSubscriptionState", "shield:DeleteProtection", "shield:CreateProtection", "shield:DescribeSubscription", "shield:ListProtections" ], "Resource": "*" } # Attach the necessary permissions alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_1)) alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_2)) alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_3)) alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_4)) alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_5)) alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_6)) alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_7)) alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_8)) alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_9)) alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_10)) # Deploy the ALB Ingress Controller from the Helm chart eks_cluster.add_helm_chart( "aws-load-balancer-controller", chart="aws-load-balancer-controller", repository="https://aws.github.io/eks-charts", namespace="kube-system", values={ "clusterName": eks_cluster.cluster_name, "region": self.region, "vpcId": eks_vpc.vpc_id, "serviceAccount": { "create": False, "name": "aws-load-balancer-controller" } } ) # Deploy External DNS Controller # Create the k8s Service account and corresponding IAM Role mapped via IRSA externaldns_service_account = eks_cluster.add_service_account( "external-dns", name="external-dns", namespace="kube-system" ) # Create the PolicyStatements to attach to the role externaldns_policy_statement_json_1 = { "Effect": "Allow", "Action": [ "route53:ChangeResourceRecordSets" ], "Resource": [ "arn:aws:route53:::hostedzone/*" ] } externaldns_policy_statement_json_2 = { "Effect": "Allow", "Action": [ "route53:ListHostedZones", "route53:ListResourceRecordSets" ], "Resource": [ "*" ] } # Add the policies to the service account externaldns_service_account.add_to_policy(iam.PolicyStatement.from_json(externaldns_policy_statement_json_1)) externaldns_service_account.add_to_policy(iam.PolicyStatement.from_json(externaldns_policy_statement_json_2)) # Deploy the Helm Chart eks_cluster.add_helm_chart( "external-dns", chart="external-dns", repository="https://charts.bitnami.com/bitnami", namespace="kube-system", values={ "provider": "aws", "aws": { "region": self.region }, "serviceAccount": { "create": False, "name": "external-dns" }, "podSecurityContext": { "fsGroup": 65534 } } ) # Install external secrets controller # Create the Service Account externalsecrets_service_account = eks_cluster.add_service_account( "kubernetes-external-secrets", name="kubernetes-external-secrets", namespace="kube-system" ) # Define the policy in JSON externalsecrets_policy_statement_json_1 = { "Effect": "Allow", "Action": [ "secretsmanager:GetResourcePolicy", "secretsmanager:GetSecretValue", "secretsmanager:DescribeSecret", "secretsmanager:ListSecretVersionIds" ], "Resource": [ "*" ] } # Add the policies to the service account externalsecrets_service_account.add_to_policy(iam.PolicyStatement.from_json(externalsecrets_policy_statement_json_1)) # Deploy the Helm Chart eks_cluster.add_helm_chart( "external-secrets", chart="kubernetes-external-secrets", repository="https://external-secrets.github.io/kubernetes-external-secrets/", namespace="kube-system", values={ "env": { "AWS_REGION": self.region }, "serviceAccount": { "name": "kubernetes-external-secrets", "create": False }, "securityContext": { "fsGroup": 65534 } } ) # Deploy Flux # Deploy the Helm Chart eks_cluster.add_helm_chart( "flux", chart="flux", repository="https://charts.fluxcd.io", namespace="kube-system", values={ "git": { "url": "[email protected]:jasonumiker/k8s-plus-aws-gitops", "path": "k8s-app-resources", "branch": "master" } } ) # Deploy Prometheus and Grafana # TODO Replace this with the new AWS Managed Prometheus and Grafana when available eks_cluster.add_helm_chart( "metrics", chart="kube-prometheus-stack", repository="https://prometheus-community.github.io/helm-charts", namespace="monitoring", values={ "prometheus": { "prometheusSpec": { "storageSpec": { "volumeClaimTemplate": { "spec": { "accessModes": [ "ReadWriteOnce" ], "resources": { "requests": { "storage": "8Gi" } }, "storageClassName": "gp2" } } } } }, "alertmanager": { "alertmanagerSpec": { "storage": { "volumeClaimTemplate": { "spec": { "accessModes": [ "ReadWriteOnce" ], "resources": { "requests": { "storage": "2Gi" } }, "storageClassName": "gp2" } } } } }, "grafana": { "persistence": { "enabled": "true", "storageClassName": "gp2" } } } ) # Deploy Fluentbit and Elasticsearch # Deploy an ElasticSearch Domain es_domain = es.Domain( self, "ESDomain", version=es.ElasticsearchVersion.V7_9 ) # Create the Service Account fluentbit_service_account = eks_cluster.add_service_account( "fluentbit", name="fluentbit", namespace="monitoring" ) # Define the policy in JSON fluentbit_policy_statement_json_1 = { "Effect": "Allow", "Action": [ "es:ESHttp*" ], "Resource": [ es_domain.domain_arn ] } # Add the policies to the service account fluentbit_service_account.add_to_policy(iam.PolicyStatement.from_json(externalsecrets_policy_statement_json_1)) # Grant fluentbit access to our ES Domain es_domain.grant_write(fluentbit_service_account) eks_cluster.add_helm_chart( "fluent-bit", chart="fluent-bit", repository="https://fluent.github.io/helm-charts", namespace="monitoring", values={ "serviceAccount": { "create": False, "name": "fluentbit" }, "config": { "outputs": "[OUTPUT]\n Name es\n Match *\n Host "+es_domain.domain_endpoint+"\n Port 443\n TLS On\n AWS_Auth On\n AWS_Region "+self.region+"\n Retry_Limit 6\n", } } )
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) role = iam.Role( scope=self, id='AwsCustomResourceRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com')) role.add_to_policy( iam.PolicyStatement(actions=['iam:PassRole'], resources=['*'])) my_custom_resource = cr.AwsCustomResource( scope=self, id='MyAwsCustomResource', role=role, policy=cr.AwsCustomResourcePolicy.from_sdk_calls(resources=['*']), on_create=cr.AwsSdkCall( action='listBuckets', service='s3', physical_resource_id=cr.PhysicalResourceId.of('BucketsList'), )) vpc = VPCConstruct(self, id_='test-vpc', num_of_azs=2) security_group = SecurityGroup( self, id='test-security-group', vpc=vpc, security_group_name='test-security-group') security_group.add_ingress_rule(connection=Port.tcp(443), peer=vpc.lambdas_sg) domain = es.Domain( scope=self, id='Domain', version=es.ElasticsearchVersion.V7_9, domain_name="es-domain-name", enable_version_upgrade=False, enforce_https=True, fine_grained_access_control=None, node_to_node_encryption=True, tls_security_policy=es.TLSSecurityPolicy.TLS_1_0, logging=es.LoggingOptions( app_log_enabled=True, slow_index_log_enabled=True, slow_search_log_enabled=True, app_log_group=LogGroup( scope=self, id="app-log-group", log_group_name=f'/aws/aes/domains/esdomain/app-log-group', removal_policy=core.RemovalPolicy.DESTROY), slow_index_log_group=LogGroup( scope=self, id="slow-index-log-group", log_group_name= f'/aws/aes/domains/esdomain/slow-index-log-group', removal_policy=core.RemovalPolicy.DESTROY), slow_search_log_group=LogGroup( scope=self, id="slow-search-log-group", log_group_name= f'/aws/aes/domains/esdomain/slow-search-log-group', removal_policy=core.RemovalPolicy.DESTROY)), removal_policy=core.RemovalPolicy.DESTROY, zone_awareness=es.ZoneAwarenessConfig(availability_zone_count=2, enabled=True), vpc_options=es.VpcOptions( security_groups=[security_group], subnets=vpc.audit_vpc.select_subnets( subnet_group_name=PRIVATE_SUBNET_GROUP).subnets))
def __init__(self, scope: core.Construct, id: str, VPC: ec2.Vpc, **kwargs) -> None: super().__init__(scope, id, **kwargs) elastic_policy = iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "es:*", ], resources=["*"], ) elastic_policy.add_any_principal() elastic_document = iam.PolicyDocument() elastic_document.add_statements(elastic_policy) appLog = logs.LogGroup(self, "appLog", log_group_name="/statement-demo/es/app", removal_policy=core.RemovalPolicy.DESTROY, retention=logs.RetentionDays.ONE_WEEK) searchLog = logs.LogGroup(self, "searchLog", log_group_name="/statement-demo/es/search", removal_policy=core.RemovalPolicy.DESTROY, retention=logs.RetentionDays.ONE_WEEK) indexLog = logs.LogGroup(self, "indexLog", log_group_name="/statement-demo/es/index", removal_policy=core.RemovalPolicy.DESTROY, retention=logs.RetentionDays.ONE_WEEK) auditLog = logs.LogGroup(self, "auditLog", log_group_name="/statement-demo/es/audit", removal_policy=core.RemovalPolicy.DESTROY, retention=logs.RetentionDays.ONE_WEEK) self.secret = sm.Secret( self, "masterUserSecret", generate_secret_string=sm.SecretStringGenerator( password_length=8, exclude_characters="\"#$%&'()*+,./:;<=>?[\\]^_`{|}~", ), removal_policy=core.RemovalPolicy.DESTROY, ) self.domain = es.Domain( self, "elastic_domain", version=es.ElasticsearchVersion.V7_7, access_policies=[elastic_policy], advanced_options=None, capacity=es.CapacityConfig( data_node_instance_type=self.node.try_get_context( "elastic")['instanceType'], data_nodes=self.node.try_get_context( "elastic")['instanceCount'], master_node_instance_type=self.node.try_get_context( "elastic")['master']['type'] if self.node.try_get_context("elastic")['master']['dedicated'] else None, master_nodes=self.node.try_get_context( "elastic")['master']['count'] if self.node.try_get_context("elastic")['master']['dedicated'] else None), domain_name="statement-demo", ebs=es.EbsOptions(enabled=(not self.node.try_get_context("elastic") ['instanceType'].startswith("i3"))), encryption_at_rest=es.EncryptionAtRestOptions(enabled=True), enforce_https=True, fine_grained_access_control=es.AdvancedSecurityOptions( master_user_name="admin", master_user_password=self.secret.secret_value), logging=es.LoggingOptions(app_log_enabled=True, app_log_group=appLog, slow_index_log_enabled=True, slow_index_log_group=indexLog, slow_search_log_enabled=True, slow_search_log_group=searchLog), node_to_node_encryption=True, tls_security_policy=es.TLSSecurityPolicy.TLS_1_2, use_unsigned_basic_auth=True, vpc_options=es.VpcOptions(security_groups=[], subnets=VPC.private_subnets), zone_awareness=es.ZoneAwarenessConfig(availability_zone_count=3, enabled=True))