def create_efs_volume(self, name): # create an EFS filesystem and access point fs = efs.FileSystem( self, name + '-fs', vpc=self.vpc, performance_mode=efs.PerformanceMode.GENERAL_PURPOSE, throughput_mode=efs.ThroughputMode.BURSTING, security_group=self.efs_sg) fs.add_access_point(name, path="/") # define an ECS volume for this filesystem volume = ecs.Volume( name=name + '-volume', efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=fs.file_system_id)) return volume
def __init__(self, scope: core.Construct, config: dict, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) if 'vpc_id' in config: vpc = ec2.Vpc.from_lookup(self, "ECS-VPC", vpc_id=config["vpc_id"]) else: vpc = None cluster = ecs.Cluster(self, cluster_name="commvault-cs", id="commvault", container_insights=True, vpc=vpc) ### Create demo bucket bucket = s3.Bucket(self, "commvault-bucket", bucket_name="commvault-demo-bucket-{}-{}".format( config["region"], config["account"])) ### This will allow the ALB to generate a certificate. domain_zone = route53.HostedZone.from_lookup( self, "walkerzone", domain_name="code.awalker.dev") ### Create EFS # kms_key = kms.Key(self, "comm-vault-key") commvault_file_system = efs.FileSystem( self, "comvault-efs", vpc=cluster.vpc, file_system_name="commvault-efs", encrypted=True, # kms_key=kms_key , ) # kms_key.grant_encrypt_decrypt(commvault_file_system.) ### Define Task Definition and add the container ecs_task = ecs.FargateTaskDefinition(self, "commvault-task") ecs_task.add_container( "commvault-container", image=ecs.ContainerImage.from_registry( "store/commvaultrepo/mediaagent:SP7"), essential=True, command=[ "-csclientname", "filesys", "-cshost", "-mountpath", '"/opt/libraryPath"', "-cvdport", "8600", "-clienthost", "-clientname", "dockermediaagent" ], logging=ecs.LogDrivers.aws_logs( stream_prefix="commvault")).add_port_mappings( ecs.PortMapping(container_port=80, host_port=80, protocol=ecs.Protocol.TCP)) ecs_task.add_to_task_role_policy(statement=iam.PolicyStatement( actions=["efs:*"], resources=['*'], effect=iam.Effect.ALLOW)) ### Create the ECS Service using the ApplicationLoadBalancedFargate pattern. ecs_service = ecs_patterns.ApplicationLoadBalancedFargateService( self, "commvault-service", assign_public_ip=False, cluster=cluster, task_definition=ecs_task, protocol=elbv2.Protocol.HTTPS, redirect_http=True, domain_name="commvault.code.awalker.dev", domain_zone=domain_zone, platform_version=ecs.FargatePlatformVersion.VERSION1_4, public_load_balancer=False) ### Grant Read/Write to the s3 Bucket for the task bucket.grant_read_write(ecs_service.task_definition.task_role) # -v $TMPDIR/CommvaultLogs:/var/log/commvault/Log_Files ecs_task.add_volume( name="CommvaultLogs", efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=commvault_file_system.file_system_id, transit_encryption="ENABLED", authorization_config=ecs.AuthorizationConfig( #iam="ENABLED", access_point_id=efs.AccessPoint( self, "CommvaultLog-access-point", path="/CommvaultLogs", file_system=commvault_file_system).access_point_id))) ecs_task.default_container.add_mount_points( ecs.MountPoint(container_path="/var/log/commvault/Log_Files", source_volume="CommvaultLogs", read_only=False)) # -v $TMPDIR/CommvaultRegistry/:/etc/CommVaultRegistry ecs_task.add_volume( name="CommVaultRegistry", efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=commvault_file_system.file_system_id, transit_encryption="ENABLED", authorization_config=ecs.AuthorizationConfig( #iam="ENABLED", access_point_id=efs.AccessPoint( self, "CommVaultRegistrys-access-point", path="/CommVaultRegistry", file_system=commvault_file_system).access_point_id))) ecs_task.default_container.add_mount_points( ecs.MountPoint(container_path="/etc/CommVaultRegistry", source_volume="CommVaultRegistry", read_only=False)) # -v $TMPDIR/libraryPath/:/opt/libraryPath ecs_task.add_volume( name="libraryPath", efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=commvault_file_system.file_system_id, transit_encryption="ENABLED", authorization_config=ecs.AuthorizationConfig( #iam="ENABLED", access_point_id=efs.AccessPoint( self, "libraryPath-access-point", path="/libraryPath", file_system=commvault_file_system).access_point_id))) ecs_task.default_container.add_mount_points( ecs.MountPoint(container_path="/opt/libraryPath", source_volume="libraryPath", read_only=False)) # -v $TMPDIR/IndexCache/:/opt/IndexCache ecs_task.add_volume( name="IndexCache", efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=commvault_file_system.file_system_id, transit_encryption="ENABLED", authorization_config=ecs.AuthorizationConfig( #iam="ENABLED", access_point_id=efs.AccessPoint( self, "IndexCache-access-point", path="/IndexCache", file_system=commvault_file_system).access_point_id))) ecs_task.default_container.add_mount_points( ecs.MountPoint(container_path="/opt/IndexCache", source_volume="IndexCache", read_only=False)) # -v $TMPDIR/jobResults/:/opt/jobResults ecs_task.add_volume( name="jobResults", efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=commvault_file_system.file_system_id, transit_encryption="ENABLED", authorization_config=ecs.AuthorizationConfig( #iam="ENABLED", access_point_id=efs.AccessPoint( self, "jobResults-access-point", path="/jobResults", file_system=commvault_file_system).access_point_id))) ecs_task.default_container.add_mount_points( ecs.MountPoint(container_path="/opt/jobResults", source_volume="jobResults", read_only=False)) # -v $TMPDIR/certificates:/opt/commvault/Base/certificates ecs_task.add_volume( name="certificates", efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=commvault_file_system.file_system_id, transit_encryption="ENABLED", authorization_config=ecs.AuthorizationConfig( #iam="ENABLED", access_point_id=efs.AccessPoint( self, "certificates-access-point", path="/certificates", file_system=commvault_file_system).access_point_id))) ecs_task.default_container.add_mount_points( ecs.MountPoint(container_path="/opt/commvault/Base/certificates", source_volume="certificates", read_only=False))
def __init__(self, scope: core.Construct, construct_id: str, props, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html#aws_cdk.aws_efs.FileSystem.add_access_point #Access points allow multiple WordPress file systems to live on the same EFS Volume #The more data on an EFS volume the better it will preform #This provides a high level of security while also optimizing performance AccessPoint = props['file_system'].add_access_point( "local-access-point", path=f"/{props['IdentifierName']}", create_acl=efs.Acl( owner_uid= "100", #https://aws.amazon.com/blogs/containers/developers-guide-to-using-amazon-efs-with-amazon-ecs-and-aws-fargate-part-2/ owner_gid="101", permissions="0755")) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Cluster.html?highlight=ecs%20cluster#aws_cdk.aws_ecs.Cluster cluster = ecs.Cluster( self, "Cluster", vpc=props['vpc'], container_insights=props['ecs_enable_container_insights']) #Get needed secrets #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ssm/StringParameter.html?highlight=from_secure_string_parameter_attributes#aws_cdk.aws_ssm.StringParameter.from_secure_string_parameter_attributes # ParameterStoreTest = ssm.StringParameter.from_secure_string_parameter_attributes( self, "ParameterStoreTest", # parameter_name="", #Remeber, KMS permissions for task execution role for parameter store key! # version=1 # ) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Secret.html #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_secretsmanager/SecretStringGenerator.html dbtest = { "database_name": '', "username": '', "host": str(props["rds_instance"].cluster_endpoint.hostname) } WordpressDbConnectionSecret = secretsmanager.Secret( self, "WordpressDbConnectionSecret", generate_secret_string=secretsmanager.SecretStringGenerator( secret_string_template=json.dumps(dbtest), generate_string_key="password", exclude_characters='/"')) #ToDO: Lambda call to populate secrets but only #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Volume.html#aws_cdk.aws_ecs.Volume WordpressEfsVolume = ecs.Volume( name="efs", efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=props['file_system'].file_system_id, transit_encryption="ENABLED", authorization_config=ecs.AuthorizationConfig( access_point_id=AccessPoint.access_point_id))) #Create Task Definition #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html WordpressTask = ecs.FargateTaskDefinition( self, "TaskDefinition", cpu=props['ecs_cpu_size'], memory_limit_mib=props['ecs_memory_size'], volumes=[WordpressEfsVolume]) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html#aws_cdk.aws_ecs.FargateTaskDefinition.add_container WordpressContainer = WordpressTask.add_container( "Wordpress", image=ecs.ContainerImage.from_ecr_repository( repository=ecr.Repository.from_repository_name( self, "wpimage", repository_name=props['ecs_container_repo_name']), tag=props['ecs_container_tag']), logging=ecs.LogDriver.aws_logs( stream_prefix="container", #log_group = "{props['environment']}/{props['unit']}/{props['application']}", #ToDo make sure I like log group name log_retention=logs.RetentionDays( props['ecs_log_retention_period'])), environment={ "TROUBLESHOOTING_MODE_ENABLED": props['TROUBLESHOOTING_MODE_ENABLED'] }, secrets={ # "PARAMETERSTORETEST": ecs.Secret.from_ssm_parameter( ParameterStoreTest ), "DBHOST": ecs.Secret.from_secrets_manager(WordpressDbConnectionSecret, "host"), "DBUSER": ecs.Secret.from_secrets_manager(WordpressDbConnectionSecret, "username"), "DBUSERPASS": ecs.Secret.from_secrets_manager(WordpressDbConnectionSecret, "password"), "DBNAME": ecs.Secret.from_secrets_manager(WordpressDbConnectionSecret, "database_name") }, ) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/ContainerDefinition.html?highlight=add_port_mappings#aws_cdk.aws_ecs.ContainerDefinition.add_port_mappings WordpressContainer.add_port_mappings( ecs.PortMapping(container_port=80, protocol=ecs.Protocol.TCP)) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/ContainerDefinition.html?highlight=add_port_mappings#aws_cdk.aws_ecs.ContainerDefinition.add_port_mappings #https://gist.github.com/phillippbertram/ee312b09c3982d76b9799653ed6d6201 WordpressContainer.add_mount_points( ecs.MountPoint(container_path=props['ecs_container_efs_path'], read_only=False, source_volume=WordpressEfsVolume.name)) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs_patterns/ApplicationLoadBalancedFargateService.html EcsService = ecs_patterns.ApplicationLoadBalancedFargateService( self, "EcsService", cluster=cluster, desired_count=props['ecs_container_desired_count'], task_definition=WordpressTask, enable_ecs_managed_tags=True, public_load_balancer=True, domain_name=props['domain_name'], domain_zone=route53.HostedZone.from_hosted_zone_attributes( self, "hostedZone", hosted_zone_id=props['domain_zone'], zone_name=props['zone_name']), listener_port=443, redirect_http=True, protocol=elasticloadbalancingv2.ApplicationProtocol("HTTPS"), target_protocol=elasticloadbalancingv2.ApplicationProtocol("HTTP"), platform_version=ecs.FargatePlatformVersion( "VERSION1_4"), #Required for EFS security_groups=[ ec2.SecurityGroup.from_security_group_id( self, "EcsToRdsSeurityGroup", security_group_id=props["EcsToRdsSeurityGroup"]. security_group_id) ], ) #https://gist.github.com/phillippbertram/ee312b09c3982d76b9799653ed6d6201 #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/Connections.html#aws_cdk.aws_ec2.Connections EcsService.service.connections.allow_to( props['file_system'], ec2.Port.tcp(2049)) #Open hole to ECS in EFS SG #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_elasticloadbalancingv2/ApplicationTargetGroup.html#aws_cdk.aws_elasticloadbalancingv2.ApplicationTargetGroup.set_attribute EcsService.target_group.set_attribute( key="load_balancing.algorithm.type", value="least_outstanding_requests") EcsService.target_group.set_attribute( key="deregistration_delay.timeout_seconds", value="30") EcsService.target_group.configure_health_check( healthy_threshold_count=5, #2-10 timeout=core.Duration.seconds(29), ) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateService.html#aws_cdk.aws_ecs.FargateService.auto_scale_task_count ECSAutoScaler = EcsService.service.auto_scale_task_count( max_capacity=props['ecs_container_max_count'], min_capacity=props['ecs_container_min_count']) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/ScalableTaskCount.html#aws_cdk.aws_ecs.ScalableTaskCount ECSAutoScaler.scale_on_cpu_utilization( "cpuScale", target_utilization_percent=80, scale_out_cooldown=core.Duration.seconds(30), scale_in_cooldown=core.Duration.seconds(60)) ECSAutoScaler.scale_on_memory_utilization( "memScale", target_utilization_percent=80, scale_out_cooldown=core.Duration.seconds(30), scale_in_cooldown=core.Duration.seconds(60))
def __init__(self, scope: core.Construct, construct_id: str, *, secrets: List[Secret]): super().__init__(scope, construct_id) vpc = aws_ec2.Vpc( self, "Vpc", enable_dns_support=True, enable_dns_hostnames=True, max_azs=3, nat_gateways=0, subnet_configuration=[ aws_ec2.SubnetConfiguration( name="Public", subnet_type=aws_ec2.SubnetType.PUBLIC) ], ) postgres_volume_name = "duckbot_dbdata" file_system = aws_efs.FileSystem( self, "PostgresFileSystem", vpc=vpc, encrypted=True, file_system_name=postgres_volume_name, removal_policy=core.RemovalPolicy.DESTROY) file_system.node.default_child.override_logical_id( "FileSystem" ) # rename for compatibility with legacy cloudformation template task_definition = aws_ecs.TaskDefinition( self, "TaskDefinition", compatibility=aws_ecs.Compatibility.EC2, family="duckbot", memory_mib="960", network_mode=aws_ecs.NetworkMode.BRIDGE) postgres_data_path = "/data/postgres" postgres = task_definition.add_container( "postgres", container_name="postgres", image=aws_ecs.ContainerImage.from_registry("postgres:13.2"), essential=False, environment={ "POSTGRES_USER": "******", "POSTGRES_PASSWORD": "******", "PGDATA": postgres_data_path, }, health_check=aws_ecs.HealthCheck( command=["CMD", "pg_isready", "-U", "duckbot"], interval=core.Duration.seconds(30), timeout=core.Duration.seconds(5), retries=3, start_period=core.Duration.seconds(30), ), logging=aws_ecs.LogDriver.aws_logs( stream_prefix="ecs", log_retention=aws_logs.RetentionDays.ONE_MONTH), memory_reservation_mib=128, ) task_definition.add_volume( name=postgres_volume_name, efs_volume_configuration=aws_ecs.EfsVolumeConfiguration( file_system_id=file_system.file_system_id, root_directory="/")) postgres.add_mount_points( aws_ecs.MountPoint(source_volume=postgres_volume_name, container_path=postgres_data_path, read_only=False)) secrets_as_parameters = { # note, parameter version is required by cdk, but does not make it into the template; specify version 1 for simplicity x.environment_name: aws_ssm.StringParameter.from_secure_string_parameter_attributes( self, x.environment_name, parameter_name=x.parameter_name, version=1) for x in secrets } duckbot = task_definition.add_container( "duckbot", container_name="duckbot", essential=True, image=aws_ecs.ContainerImage.from_registry( self.node.try_get_context("duckbot_image")), environment={"STAGE": "prod"}, secrets={ k: aws_ecs.Secret.from_ssm_parameter(v) for k, v in secrets_as_parameters.items() }, health_check=aws_ecs.HealthCheck( command=["CMD", "python", "-m", "duckbot.health"], interval=core.Duration.seconds(30), timeout=core.Duration.seconds(10), retries=3, start_period=core.Duration.seconds(30), ), logging=aws_ecs.LogDriver.aws_logs( stream_prefix="ecs", log_retention=aws_logs.RetentionDays.ONE_MONTH), memory_reservation_mib=128, ) duckbot.add_link(postgres) asg = aws_autoscaling.AutoScalingGroup( self, "AutoScalingGroup", min_capacity=0, max_capacity=1, desired_capacity=1, machine_image=aws_ecs.EcsOptimizedImage.amazon_linux2(), instance_type=aws_ec2.InstanceType("t2.micro"), key_name="duckbot", # needs to be created manually instance_monitoring=aws_autoscaling.Monitoring.BASIC, vpc=vpc, ) asg.connections.allow_to_default_port(file_system) asg.connections.allow_from(aws_ec2.Peer.any_ipv4(), aws_ec2.Port.tcp(22)) asg.connections.allow_from(aws_ec2.Peer.any_ipv4(), aws_ec2.Port.tcp(80)) asg.connections.allow_from(aws_ec2.Peer.any_ipv4(), aws_ec2.Port.tcp(443)) cluster = aws_ecs.Cluster(self, "Cluster", cluster_name="duckbot", vpc=vpc) cluster.add_asg_capacity_provider( aws_ecs.AsgCapacityProvider(cluster, "AsgCapacityProvider", auto_scaling_group=asg), can_containers_access_instance_role=True) aws_ecs.Ec2Service( self, "Service", service_name="duckbot", cluster=cluster, task_definition=task_definition, desired_count=1, min_healthy_percent=0, max_healthy_percent=100, )
def __init__(self, scope: core.Construct, construct_id: str, properties: WordpressStackProperties, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) database = rds.ServerlessCluster( self, "WordpressServerless", engine=rds.DatabaseClusterEngine.AURORA_MYSQL, default_database_name="WordpressDatabase", vpc=properties.vpc, scaling=rds.ServerlessScalingOptions( auto_pause=core.Duration.seconds(0)), deletion_protection=False, backup_retention=core.Duration.days(7), removal_policy=core.RemovalPolicy.DESTROY, ) file_system = efs.FileSystem( self, "WebRoot", vpc=properties.vpc, performance_mode=efs.PerformanceMode.GENERAL_PURPOSE, throughput_mode=efs.ThroughputMode.BURSTING, ) # docker context directory docker_context_path = os.path.dirname(__file__) + "../../src" # upload images to ecr nginx_image = ecr_assets.DockerImageAsset( self, "Nginx", directory=docker_context_path, file="Docker.nginx", ) wordpress_image = ecr_assets.DockerImageAsset( self, "Php", directory=docker_context_path, file="Docker.wordpress", ) cluster = ecs.Cluster(self, 'ComputeResourceProvider', vpc=properties.vpc) wordpress_volume = ecs.Volume( name="WebRoot", efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=file_system.file_system_id)) event_task = ecs.FargateTaskDefinition(self, "WordpressTask", volumes=[wordpress_volume]) # # webserver # nginx_container = event_task.add_container( "Nginx", image=ecs.ContainerImage.from_docker_image_asset(nginx_image)) nginx_container.add_port_mappings(ecs.PortMapping(container_port=80)) nginx_container_volume_mount_point = ecs.MountPoint( read_only=True, container_path="/var/www/html", source_volume=wordpress_volume.name) nginx_container.add_mount_points(nginx_container_volume_mount_point) # # application server # app_container = event_task.add_container( "Php", environment={ 'WORDPRESS_DB_HOST': database.cluster_endpoint.hostname, 'WORDPRESS_TABLE_PREFIX': 'wp_' }, secrets={ 'WORDPRESS_DB_USER': ecs.Secret.from_secrets_manager(database.secret, field="username"), 'WORDPRESS_DB_PASSWORD': ecs.Secret.from_secrets_manager(database.secret, field="password"), 'WORDPRESS_DB_NAME': ecs.Secret.from_secrets_manager(database.secret, field="dbname"), }, image=ecs.ContainerImage.from_docker_image_asset(wordpress_image)) app_container.add_port_mappings(ecs.PortMapping(container_port=9000)) container_volume_mount_point = ecs.MountPoint( read_only=False, container_path="/var/www/html", source_volume=wordpress_volume.name) app_container.add_mount_points(container_volume_mount_point) # # create service # wordpress_service = ecs.FargateService( self, "InternalService", task_definition=event_task, platform_version=ecs.FargatePlatformVersion.VERSION1_4, cluster=cluster, ) # # scaling # scaling = wordpress_service.auto_scale_task_count(min_capacity=2, max_capacity=50) scaling.scale_on_cpu_utilization( "CpuScaling", target_utilization_percent=85, scale_in_cooldown=core.Duration.seconds(120), scale_out_cooldown=core.Duration.seconds(30), ) # # network acl # database.connections.allow_default_port_from(wordpress_service, "wordpress access to db") file_system.connections.allow_default_port_from(wordpress_service) # # external access # wordpress_service.connections.allow_from( other=properties.load_balancer, port_range=ec2.Port.tcp(80)) http_listener = properties.load_balancer.add_listener( "HttpListener", port=80, ) http_listener.add_targets( "HttpServiceTarget", protocol=elbv2.ApplicationProtocol.HTTP, targets=[wordpress_service], health_check=elbv2.HealthCheck(healthy_http_codes="200,301,302"))
def __init__(self, scope: core.Construct, id: str, datalake: DataLakeLayer, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.__datalake = datalake self.security_group = ec2.SecurityGroup( self, 'SecurityGroup', vpc=self.datalake.vpc, allow_all_outbound=True, description='SonarQube Security Group') self.security_group.add_ingress_rule(peer=ec2.Peer.any_ipv4(), connection=ec2.Port.all_traffic(), description='Allow any traffic') self.sonarqube_svr_ecr = ecr.DockerImageAsset( self, 'Repo', directory=os.path.join(root_dir, 'images/sonarqube-server'), repository_name='sonarqube') self.sonarqube_cli_ecr = ecr.DockerImageAsset( self, 'Cli', directory=os.path.join(root_dir, 'images/sonarqube-scanner'), repository_name='sonarqube-cli') self.database = rds.DatabaseCluster( self, 'Database', engine=rds.DatabaseClusterEngine.aurora_postgres( version=rds.AuroraPostgresEngineVersion.VER_11_9), default_database_name='sonarqube', removal_policy=core.RemovalPolicy.DESTROY, credentials=rds.Credentials.from_username( username='******', password=core.SecretValue(value='postgres')), instance_props=rds.InstanceProps( vpc=self.datalake.vpc, security_groups=[self.security_group], instance_type=ec2.InstanceType('r6g.xlarge'))) # self.ecs_cluster = ecs.Cluster(self,'SonarCluster', # container_insights=True, # vpc=self.datalake.vpc, # capacity=ecs.AddCapacityOptions( # machine_image_type= ecs.MachineImageType.AMAZON_LINUX_2, # instance_type=ec2.InstanceType('m5.xlarge'), # allow_all_outbound=True, # associate_public_ip_address=False, # vpc_subnets= ec2.SubnetSelection(subnet_type= ec2.SubnetType.PUBLIC), # desired_capacity=2)) # self.service = ecsp.ApplicationLoadBalancedEc2Service(self,'SonarEc2', # cluster=self.ecs_cluster, # desired_count=1, # listener_port=80, # memory_reservation_mib= 4 * 1024, # task_image_options= ecsp.ApplicationLoadBalancedTaskImageOptions( # image= ecs.ContainerImage.from_docker_image_asset(asset=self.sonarqube_svr_ecr), # container_name='sonarqube-svr', # container_port=9000, # enable_logging=True, # environment={ # '_SONAR_JDBC_URL':'jdbc:postgresql://{}/sonarqube'.format( # self.database.cluster_endpoint.hostname), # '_SONAR_JDBC_USERNAME':'******', # '_SONAR_JDBC_PASSWORD':'******' # })) self.service = ecsp.ApplicationLoadBalancedFargateService( self, 'Server', assign_public_ip=True, vpc=self.datalake.vpc, desired_count=1, cpu=4096, memory_limit_mib=8 * 1024, listener_port=80, platform_version=ecs.FargatePlatformVersion.VERSION1_4, security_groups=[self.security_group, self.datalake.efs_sg], task_image_options=ecsp.ApplicationLoadBalancedTaskImageOptions( image=ecs.ContainerImage.from_docker_image_asset( asset=self.sonarqube_svr_ecr), container_name='sonarqube-svr', container_port=9000, enable_logging=True, environment={ '_SONAR_JDBC_URL': 'jdbc:postgresql://{}/sonarqube'.format( self.database.cluster_endpoint.hostname), '_SONAR_JDBC_USERNAME': '******', '_SONAR_JDBC_PASSWORD': '******' })) for name in ['AmazonElasticFileSystemClientFullAccess']: self.service.task_definition.task_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name(name)) # Override container specific settings container = self.service.task_definition.default_container # Required to start remote sql container.add_ulimits( ecs.Ulimit(name=ecs.UlimitName.NOFILE, soft_limit=262145, hard_limit=262145)) for folder in ['data', 'logs']: efs_ap = self.datalake.efs.add_access_point( 'sonarqube-' + folder, create_acl=efs.Acl(owner_gid="0", owner_uid="0", permissions="777"), path='/sonarqube/' + folder) self.service.task_definition.add_volume( name=folder, efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=self.datalake.efs.file_system_id, transit_encryption='ENABLED', authorization_config=ecs.AuthorizationConfig( access_point_id=efs_ap.access_point_id, iam='DISABLED'))) container.add_mount_points( ecs.MountPoint(container_path='/opt/sonarqube/' + folder, source_volume=folder, read_only=False))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) volume_name = 'factorio' self.vpc = ec2.Vpc(self, "vpc", max_azs=1, nat_gateways=0) self.efs_fs = efs.FileSystem(self, 'Filesystem', vpc=self.vpc, enable_automatic_backups=True) self.ecs = ecs.Cluster(self, "Fargate", vpc=self.vpc ) self.task_definition = ecs.FargateTaskDefinition(self, "Factorio", cpu=2048, memory_limit_mib=4096, volumes=[ ecs.Volume( name=volume_name, efs_volume_configuration=ecs.EfsVolumeConfiguration(file_system_id=self.efs_fs.file_system_id) ) ] ) self.container = self.task_definition.add_container("hello-world", image=ecs.ContainerImage.from_registry(name="factoriotools/factorio:stable")) self.container.add_mount_points(ecs.MountPoint(container_path="/factorio", read_only=False, source_volume= volume_name)) udp_34197_mapping= ecs.PortMapping(container_port=34197, host_port=34197, protocol=ecs.Protocol.UDP) tcp_27015_mapping= ecs.PortMapping(container_port=27015, host_port=27015, protocol=ecs.Protocol.TCP) self.container.add_port_mappings(udp_34197_mapping, tcp_27015_mapping) core.CfnOutput(self, "VPC", value=self.vpc.vpc_id) core.CfnOutput(self, "EFS", value=self.efs_fs.file_system_id) core.CfnOutput(self, "TaskDef", value=self.task_definition.task_definition_arn) core.CfnOutput(self, "Container", value=self.container.container_name)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Get config value for alert email email = self.node.try_get_context("email") if email == 'changeme@localhost': exit( 'ERROR: Change the email in cdk.json or pass it with -c email=changeme@localhost' ) # Create SNS for alarms to be sent to alarm_topic = sns.Topic(self, "backup_alarm", display_name="backup_alarm") # Subscribe my email so the alarms go to me alarm_topic.add_subscription(subscriptions.EmailSubscription(email)) # Create VPC to run everything in. We make this public just because we don't # want to spend $30/mo on a NAT gateway. vpc = ec2.Vpc( self, "VPC", nat_gateways=0, subnet_configuration=[ ec2.SubnetConfiguration(name="public", subnet_type=ec2.SubnetType.PUBLIC) ], ) ecs_sg = ec2.SecurityGroup(self, "ecs_sg", vpc=vpc) efs_sg = ec2.SecurityGroup(self, "efs_sg", vpc=vpc) efs_sg.add_ingress_rule( peer=ecs_sg, connection=ec2.Port.tcp(2049), description="Allow backup runner access", ) # Open this to the VPC efs_sg.add_ingress_rule( peer=ec2.Peer.ipv4("10.0.0.0/8"), connection=ec2.Port.tcp(2049), description="Allow backup runner access", ) # Define the EFS fileSystem = efs.FileSystem( self, "MyEfsFileSystem", vpc=vpc, encrypted=True, lifecycle_policy=efs.LifecyclePolicy.AFTER_7_DAYS, performance_mode=efs.PerformanceMode.GENERAL_PURPOSE, throughput_mode=efs.ThroughputMode.BURSTING, security_group=efs_sg, ) # Define the ECS task cluster = ecs.Cluster(self, "Cluster", vpc=vpc) taskDefinition = ecs.FargateTaskDefinition( self, "taskDefinition", volumes=[ ecs.Volume( name="efsvolume", efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=fileSystem.file_system_id, root_directory="/", transit_encryption="ENABLED", ), ) ], memory_limit_mib=8192, cpu=2048, ) log_driver = ecs.AwsLogDriver( stream_prefix="backup_runner", log_retention=logs.RetentionDays.TWO_WEEKS, ) taskDefinition.add_container( "backup-runner", image=ecs.ContainerImage.from_asset("./resources/backup_runner"), memory_limit_mib=8192, cpu=2048, logging=log_driver, ) # The previous method to add the container doesn't let us specify the mount point for the EFS, # so we have to do it here, and referencing the container that was just added. taskDefinition.default_container.add_mount_points( ecs.MountPoint(container_path="/mnt/efs", read_only=False, source_volume="efsvolume")) # Create rule to trigger this be run every 24 hours events.Rule( self, "scheduled_run", rule_name="backup_runner", # Run at 2am EST (6am UTC) every night schedule=events.Schedule.expression("cron(0 0 * * ? *)"), description="Starts the backup runner task every night", targets=[ targets.EcsTask( cluster=cluster, task_definition=taskDefinition, subnet_selection=ec2.SubnetSelection( subnet_type=ec2.SubnetType.PUBLIC), platform_version=ecs.FargatePlatformVersion. VERSION1_4, # Required to use EFS # Because "Latest" does not yet support EFS security_groups=[ecs_sg], ) ], ) # Create notification topic for backups backup_topic = sns.Topic(self, "backup_topic", display_name="Backup status") # Create AWS Backup vault = backup.BackupVault( self, "Vault", access_policy=iam.PolicyDocument(statements=[ iam.PolicyStatement( effect=iam.Effect.DENY, actions=[ "backup:DeleteBackupVault", "backup:DeleteRecoveryPoint", "backup:UpdateRecoveryPointLifecycle", # "backup:PutBackupVaultAccessPolicy", # This results in "Failed putting policy for Backup vault backuprunnerVaultXXX as it will lock down from further policy changes" "backup:DeleteBackupVaultAccessPolicy", "backup:DeleteBackupVaultNotifications", # "backup:PutBackupVaultNotifications", # This causes oher part of this app to fail. ], resources=["*"], principals=[iam.AnyPrincipal()], ) ]), notification_topic=alarm_topic, notification_events=[ # Monitor for some failures or access to the backups backup.BackupVaultEvents.BACKUP_JOB_EXPIRED, backup.BackupVaultEvents.BACKUP_JOB_FAILED, backup.BackupVaultEvents.COPY_JOB_FAILED, backup.BackupVaultEvents.COPY_JOB_FAILED, backup.BackupVaultEvents.COPY_JOB_STARTED, backup.BackupVaultEvents.RESTORE_JOB_COMPLETED, backup.BackupVaultEvents.RESTORE_JOB_FAILED, backup.BackupVaultEvents.RESTORE_JOB_STARTED, backup.BackupVaultEvents.RESTORE_JOB_SUCCESSFUL, ], ) plan = backup.BackupPlan.daily35_day_retention(self, "backup") plan.add_selection( "Selection", resources=[backup.BackupResource.from_efs_file_system(fileSystem)], ) # # Create metric filter for errors in the CloudWatch Logs from the ECS # METRIC_NAME = "log_errors" METRIC_NAMESPACE = "backup_runner" metric = cloudwatch.Metric(namespace=METRIC_NAMESPACE, metric_name=METRIC_NAME) error_metric = logs.MetricFilter( self, "MetricFilterId", metric_name=METRIC_NAME, metric_namespace=METRIC_NAMESPACE, log_group=log_driver.log_group, filter_pattern=logs.FilterPattern.any_term("ERROR"), metric_value="1", ) error_alarm = cloudwatch.Alarm( self, "AlarmId", metric=metric, evaluation_periods=1, actions_enabled=True, alarm_name="backuper_runner_alarm", alarm_description="Errors in backup runner", comparison_operator=cloudwatch.ComparisonOperator. GREATER_THAN_OR_EQUAL_TO_THRESHOLD, treat_missing_data=cloudwatch.TreatMissingData.NOT_BREACHING, period=core.Duration.hours(1), threshold=1, statistic="sum", ) # Connect the alarm to the SNS error_alarm.add_alarm_action(cloudwatch_actions.SnsAction(alarm_topic)) # The above doesn't give it privileges, so add them to the alarm topic resource policy. alarm_topic.add_to_resource_policy( iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["sns:Publish"], resources=[alarm_topic.topic_arn], principals=[iam.ServicePrincipal("cloudwatch.amazonaws.com")], ))
def __init__(self, scope: core.Construct, id: str, custom_vpc, efs_share, efs_ap_nginx, enable_container_insights: bool = False, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Create Security Group to allow Fargate Cluster instances to access EFS. web_svc_sg = _ec2.SecurityGroup( self, id="webSvcSecurityGroup", vpc=custom_vpc, security_group_name=f"web_svc_sg_{id}", description= "Security Group to allow Fargate Cluster instances to access EFS") # Allow Internet access to Fargate web service web_svc_sg.add_ingress_rule( _ec2.Peer.any_ipv4(), _ec2.Port.tcp(80), description="Allow Internet access to web service") # The code that defines your stack goes here fargate_cluster = _ecs.Cluster( self, "fargateClusterId", cluster_name=f"web-app-{id}", # container_insights=enable_container_insights, vpc=custom_vpc) web_app_task_def = _ecs.FargateTaskDefinition( self, "webAppTaskDef", cpu=256, memory_limit_mib=512, ) # Add EFS Volume to TaskDef web_app_task_def.add_volume( name="html", efs_volume_configuration=_ecs.EfsVolumeConfiguration( file_system_id=efs_share.file_system_id, transit_encryption="ENABLED", authorization_config=_ecs.AuthorizationConfig( access_point_id=efs_ap_nginx.access_point_id))) web_app_container = web_app_task_def.add_container( "webAppContainer", cpu=256, memory_limit_mib=512, environment={ "github": "https://github.com/miztiik", "ko_fi": "https://ko-fi.com/miztiik" }, image=_ecs.ContainerImage.from_registry("nginx:latest"), logging=_ecs.LogDrivers.aws_logs( stream_prefix="mystique-automation-logs", log_retention=_logs.RetentionDays.ONE_DAY)) web_app_container.add_ulimits( _ecs.Ulimit(name=_ecs.UlimitName.NOFILE, soft_limit=65536, hard_limit=65536)) web_app_container.add_port_mappings( _ecs.PortMapping(container_port=80, protocol=_ecs.Protocol.TCP)) web_app_container.add_port_mappings( _ecs.PortMapping(container_port=443, protocol=_ecs.Protocol.TCP)) # Mount EFS Volume to Web Server Container web_app_container.add_mount_points( _ecs.MountPoint(container_path="/usr/share/nginx/html", read_only=False, source_volume="html")) # Launch service and attach load balancer using CDK Pattern web_app_service = _ecs_patterns.ApplicationLoadBalancedFargateService( self, "webSrv", platform_version=_ecs.FargatePlatformVersion.VERSION1_4, cluster=fargate_cluster, task_definition=web_app_task_def, assign_public_ip=False, public_load_balancer=True, listener_port=80, desired_count=1, # enable_ecs_managed_tags=True, health_check_grace_period=core.Duration.seconds(60), # cpu=1024, # memory_limit_mib=2048, # service_name="chatAppService", ) # Outputs output_0 = core.CfnOutput( self, "AutomationFrom", value=f"{GlobalArgs.SOURCE_INFO}", description= "To know more about this automation stack, check out our github page." ) output_1 = core.CfnOutput( self, "ClusterNameOutput", value=f"{fargate_cluster.cluster_name}", description= "To know more about this automation stack, check out our github page." ) output_2 = core.CfnOutput( self, "webAppServiceUrl", value= f"http://{web_app_service.load_balancer.load_balancer_dns_name}", description= "Use an utility like curl or an browser to access the web server.")
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/Vpc.html vpc = ec2.Vpc(self, "vpc", cidr=props['vpc_CIDR'], max_azs=3, subnet_configuration=[{ 'cidrMask': 28, 'name': 'public', 'subnetType': ec2.SubnetType.PUBLIC }, { 'cidrMask': 28, 'name': 'private', 'subnetType': ec2.SubnetType.PRIVATE }, { 'cidrMask': 28, 'name': 'db', 'subnetType': ec2.SubnetType.ISOLATED }]) rds_subnetGroup = rds.SubnetGroup( self, "rds_subnetGroup", description= f"Group for {props['environment']}-{props['application']}-{props['unit']} DB", vpc=vpc, vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.ISOLATED)) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_rds/DatabaseCluster.html ##TODO:ADD Aurora Serverless Option rds_instance = rds.DatabaseCluster( self, 'wordpress-db', engine=rds.DatabaseClusterEngine.aurora_mysql( version=rds.AuroraMysqlEngineVersion.VER_2_07_2), instances=1, instance_props=rds.InstanceProps( vpc=vpc, enable_performance_insights=props[ 'rds_enable_performance_insights'], instance_type=ec2.InstanceType( instance_type_identifier=props['rds_instance_type'])), subnet_group=rds_subnetGroup, storage_encrypted=props['rds_storage_encrypted'], backup=rds.BackupProps(retention=core.Duration.days( props['rds_automated_backup_retention_days']))) EcsToRdsSeurityGroup = ec2.SecurityGroup( self, "EcsToRdsSeurityGroup", vpc=vpc, description="Allow WordPress containers to talk to RDS") #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html db_cred_generator = _lambda.Function( self, 'db_creds_generator', runtime=_lambda.Runtime.PYTHON_3_8, handler='db_creds_generator.handler', code=_lambda.Code.asset('lambda/db_creds_generator'), vpc=vpc, vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.ISOLATED ), #vpc.select_subnets(subnet_type = ec2.SubnetType("ISOLATED")).subnets , environment={ 'SECRET_NAME': rds_instance.secret.secret_name, }) #Set Permissions and Sec Groups rds_instance.connections.allow_from( EcsToRdsSeurityGroup, ec2.Port.tcp(3306)) #Open hole to RDS in RDS SG #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html file_system = efs.FileSystem( self, "MyEfsFileSystem", vpc=vpc, encrypted=True, # file system is not encrypted by default lifecycle_policy=props['efs_lifecycle_policy'], performance_mode=efs.PerformanceMode.GENERAL_PURPOSE, throughput_mode=efs.ThroughputMode.BURSTING, removal_policy=core.RemovalPolicy(props['efs_removal_policy']), enable_automatic_backups=props['efs_automatic_backups']) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Cluster.html?highlight=ecs%20cluster#aws_cdk.aws_ecs.Cluster cluster = ecs.Cluster( self, "Cluster", vpc=vpc, container_insights=props['ecs_enable_container_insights']) if props['deploy_bastion_host']: #ToDo: Deploy bastion host with a key file #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/BastionHostLinux.html bastion_host = ec2.BastionHostLinux(self, 'bastion_host', vpc=vpc) rds_instance.connections.allow_from(bastion_host, ec2.Port.tcp(3306)) ####################### ### Developer Tools ### # SFTP into the EFS Shared File System NetToolsSecret = secretsmanager.Secret( self, "NetToolsSecret", generate_secret_string=secretsmanager.SecretStringGenerator( secret_string_template=json.dumps({ "username": '******', "ip": '' }), generate_string_key="password", exclude_characters='/"')) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html#aws_cdk.aws_efs.FileSystem.add_access_point AccessPoint = file_system.add_access_point( "access-point", path="/", create_acl=efs.Acl( owner_uid= "100", #https://aws.amazon.com/blogs/containers/developers-guide-to-using-amazon-efs-with-amazon-ecs-and-aws-fargate-part-2/ owner_gid="101", permissions="0755")) EfsVolume = ecs.Volume( name="efs", efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=file_system.file_system_id, transit_encryption="ENABLED", authorization_config=ecs.AuthorizationConfig( access_point_id=AccessPoint.access_point_id))) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html NetToolsTask = ecs.FargateTaskDefinition(self, "TaskDefinition", cpu=256, memory_limit_mib=512, volumes=[EfsVolume]) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html#aws_cdk.aws_ecs.FargateTaskDefinition.add_container NetToolsContainer = NetToolsTask.add_container( "NetTools", image=ecs.ContainerImage.from_registry('netresearch/sftp'), command=['test:test:100:101:efs']) NetToolsContainer.add_port_mappings( ecs.PortMapping(container_port=22, protocol=ecs.Protocol.TCP)) NetToolsContainer.add_mount_points( ecs.MountPoint( container_path= "/home/test/efs", #ToDo build path out with username from secret read_only=False, source_volume=EfsVolume.name, )) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateService.html?highlight=fargateservice#aws_cdk.aws_ecs.FargateService service = ecs.FargateService( self, "Service", cluster=cluster, task_definition=NetToolsTask, platform_version=ecs.FargatePlatformVersion( "VERSION1_4"), #Required for EFS ) #ToDo somehow store container's IP on deploy #Allow traffic to EFS Volume from Net Tools container service.connections.allow_to(file_system, ec2.Port.tcp(2049)) #ToDo allow bastion host into container on port 22 #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html bastion_ip_locator = _lambda.Function( self, 'bastion_ip_locator', function_name= f"{props['environment']}-{props['application']}-{props['unit']}-SFTP-IP", runtime=_lambda.Runtime.PYTHON_3_8, handler='bastion_ip_locator.handler', code=_lambda.Code.asset('lambda/bastion_ip_locator'), environment={ 'CLUSTER_NAME': cluster.cluster_arn, 'SERVICE_NAME': service.service_name }) #Give needed perms to bastion_ip_locator for reading info from ECS bastion_ip_locator.add_to_role_policy( iam.PolicyStatement( actions=["ecs:DescribeTasks"], resources=[ #f"arn:aws:ecs:us-east-1:348757191778:service/{cluster.cluster_name}/{service.service_name}", f"arn:aws:ecs:us-east-1:348757191778:task/{cluster.cluster_name}/*" ])) bastion_ip_locator.add_to_role_policy( iam.PolicyStatement(actions=[ "ecs:ListTasks", ], resources=["*"], conditions={ 'ArnEquals': { 'ecs:cluster': cluster.cluster_arn } })) self.output_props = props.copy() self.output_props["vpc"] = vpc self.output_props["rds_instance"] = rds_instance self.output_props["EcsToRdsSeurityGroup"] = EcsToRdsSeurityGroup self.output_props["file_system"] = file_system self.output_props["cluster"] = cluster