def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # 建VPC与ECS Cluster # TODO: 即使指定 max_azs, 也只能部署2个AZ vpc = ec2.Vpc(self, "ECSVPC", cidr='10.0.0.0/16') cluster = ecs.Cluster(self, "ECSCluster", vpc=vpc) #建Task Definition task_definition = ecs.FargateTaskDefinition( self, "ECSDemoTaskDefinition", task_role=iam.Role.from_role_arn( self, "fargate_task_role", "arn:aws-cn:iam::402202783068:role/ECS-Task-Role-Firelens"), execution_role=iam.Role.from_role_arn( self, "fargate_task_execution_role", "arn:aws-cn:iam::402202783068:role/ecsTaskExecutionRole")) task_definition.add_volume(name="data") # App Container app_container = task_definition.add_container( "AppContainer", image=ecs.ContainerImage.from_ecr_repository( ecr.Repository.from_repository_name( self, id="app-file-image", repository_name="app-file")), logging=ecs.FireLensLogDriver()) app_container.add_mount_points( ecs.MountPoint(container_path="/data/logs", read_only=False, source_volume="data")) # app_container.add_port_mappings(ecs.PortMapping(container_port=80)) # Log Router fluentbit_container = ecs.FirelensLogRouter( self, "fluentbit_container", firelens_config=ecs.FirelensConfig( type=ecs.FirelensLogRouterType.FLUENTBIT, options=ecs.FirelensOptions(config_file_value="/extra.conf")), task_definition=task_definition, image=ecs.ContainerImage.from_ecr_repository( ecr.Repository.from_repository_name( self, id="log-router", repository_name="firelens-file")), logging=ecs.AwsLogDriver( stream_prefix="/ecs/firelens-fluentbit-demo/")) fluentbit_container.add_mount_points( ecs.MountPoint(container_path="/data/logs", read_only=False, source_volume="data"))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) volume_name = 'factorio' self.vpc = ec2.Vpc(self, "vpc", max_azs=1, nat_gateways=0) self.efs_fs = efs.FileSystem(self, 'Filesystem', vpc=self.vpc, enable_automatic_backups=True) self.ecs = ecs.Cluster(self, "Fargate", vpc=self.vpc ) self.task_definition = ecs.FargateTaskDefinition(self, "Factorio", cpu=2048, memory_limit_mib=4096, volumes=[ ecs.Volume( name=volume_name, efs_volume_configuration=ecs.EfsVolumeConfiguration(file_system_id=self.efs_fs.file_system_id) ) ] ) self.container = self.task_definition.add_container("hello-world", image=ecs.ContainerImage.from_registry(name="factoriotools/factorio:stable")) self.container.add_mount_points(ecs.MountPoint(container_path="/factorio", read_only=False, source_volume= volume_name)) udp_34197_mapping= ecs.PortMapping(container_port=34197, host_port=34197, protocol=ecs.Protocol.UDP) tcp_27015_mapping= ecs.PortMapping(container_port=27015, host_port=27015, protocol=ecs.Protocol.TCP) self.container.add_port_mappings(udp_34197_mapping, tcp_27015_mapping) core.CfnOutput(self, "VPC", value=self.vpc.vpc_id) core.CfnOutput(self, "EFS", value=self.efs_fs.file_system_id) core.CfnOutput(self, "TaskDef", value=self.task_definition.task_definition_arn) core.CfnOutput(self, "Container", value=self.container.container_name)
def __init__(self, scope: core.Construct, id: str, cluster: ecs.ICluster, repo: ecr.IRepository, **kwargs) -> None: super().__init__(scope, id, **kwargs) # bucket self.xmlBucket = s3.Bucket( scope=self, id="XmlBucket", block_public_access=s3.BlockPublicAccess.BLOCK_ALL, encryption=s3.BucketEncryption.S3_MANAGED) core.CfnOutput(scope=self, id="XmlBucketName", value=self.xmlBucket.bucket_name) # service skeleton batch_task_definition = ecs.FargateTaskDefinition( scope=self, id="BatchTaskDef", cpu=2048, memory_limit_mib=4096, volumes=[ecs.Volume(name='storage')]) batch_container = batch_task_definition.add_container( id="BatchContainer", image=ecs.ContainerImage.from_ecr_repository(repository=repo, tag='latest'), logging=ecs.LogDrivers.aws_logs(stream_prefix="BatchProcessing"), environment={'BUCKET': self.xmlBucket.bucket_name}) batch_container.add_mount_points( ecs.MountPoint(container_path='/opt/data', read_only=False, source_volume='storage')) batch_task_definition.task_role.add_to_policy( statement=iam.PolicyStatement(resources=[ self.xmlBucket.bucket_arn, self.xmlBucket.bucket_arn + '/*' ], actions=['s3:*'])) ssm.StringParameter(scope=self, id='SSMParamBatchImageName', string_value=batch_container.container_name, parameter_name='image_batch')
def create_container(self, name, task, operators, volume, type="VANILLA"): container = task.add_container( name, image=ecs.ContainerImage.from_registry("itzg/minecraft-server"), essential=True, environment={ "EULA": "TRUE", "OPS": operators, "ALLOW_NETHER": "true", "ENABLE_COMMAND_BLOCK": "true", "MAX_TICK_TIME": "60000", "MAX_MEMORY": "3600M", "TYPE": type }, logging=ecs.LogDrivers.aws_logs( stream_prefix=name, log_retention=logs.RetentionDays.ONE_WEEK, )) container.add_port_mappings(ecs.PortMapping(container_port=25565)) container.add_mount_points( ecs.MountPoint(container_path="/data", source_volume=volume.name, read_only=False)) return container
efs_to_connect="addr=" + efsdns +",nfsvers=4.0,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2" #efs_to_connect="addr=" + efsvol.file_system_id device_set=efsdns+":/" driveropts={ "type": "nfs", "device":device_set, "o": efs_to_connect #"o": "addr=fs-XXXXXX.efs.us-east-1.amazonaws.com,nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport" } docker_vol_config=ecs.DockerVolumeConfiguration(driver='local', scope=ecs.Scope.TASK, driver_opts=driveropts, labels=None) docker_volume=ecs.Volume(name='docker_vol',docker_volume_configuration=docker_vol_config) efs_mount=ecs.MountPoint(container_path='/efs',read_only=True, source_volume='docker_vol') cluster = ecs.Cluster( stack, "wes-onetest-ecs", vpc=vpc ) cluster.add_capacity("DefaultAutoScalingGroup", instance_type=ec2.InstanceType("c5.xlarge"), key_name='aws-eb',max_capacity=4,machine_image=amitouse, desired_capacity=2,min_capacity=2) # Create a task definition with its own elastic network interface iam.ServicePrincipal('task') task_definition_vistaweb = ecs.Ec2TaskDefinition( stack, "west-onetest-task-vistaweb",
def __init__(self, scope: core.Stack, id: str, cluster, vpc, worker, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.cluster = cluster self.vpc = vpc self.worker = worker # Building a custom image for jenkins master. self.container_image = ecr.DockerImageAsset( self, "JenkinsMasterDockerImage", directory='./docker/master/') if config['DEFAULT']['fargate_enabled'] == "yes" or not config[ 'DEFAULT']['ec2_enabled'] == "yes": # Task definition details to define the Jenkins master container self.jenkins_task = ecs_patterns.ApplicationLoadBalancedTaskImageOptions( # image=ecs.ContainerImage.from_ecr_repository(self.container_image.repository), image=ecs.ContainerImage.from_docker_image_asset( self.container_image), container_port=8080, enable_logging=True, environment={ # https://github.com/jenkinsci/docker/blob/master/README.md#passing-jvm-parameters 'JAVA_OPTS': '-Djenkins.install.runSetupWizard=false', # https://github.com/jenkinsci/configuration-as-code-plugin/blob/master/README.md#getting-started 'CASC_JENKINS_CONFIG': '/config-as-code.yaml', 'network_stack': self.vpc.stack_name, 'cluster_stack': self.cluster.stack_name, 'worker_stack': self.worker.stack_name, 'cluster_arn': self.cluster.cluster.cluster_arn, 'aws_region': config['DEFAULT']['region'], 'jenkins_url': config['DEFAULT']['jenkins_url'], 'subnet_ids': ",".join( [x.subnet_id for x in self.vpc.vpc.private_subnets]), 'security_group_ids': self.worker.worker_security_group.security_group_id, 'execution_role_arn': self.worker.worker_execution_role.role_arn, 'task_role_arn': self.worker.worker_task_role.role_arn, 'worker_log_group': self.worker.worker_logs_group.log_group_name, 'worker_log_stream_prefix': self.worker.worker_log_stream.log_stream_name }, ) # Create the Jenkins master service self.jenkins_master_service_main = ecs_patterns.ApplicationLoadBalancedFargateService( self, "JenkinsMasterService", cpu=int(config['DEFAULT']['fargate_cpu']), memory_limit_mib=int( config['DEFAULT']['fargate_memory_limit_mib']), cluster=self.cluster.cluster, desired_count=1, enable_ecs_managed_tags=True, task_image_options=self.jenkins_task, cloud_map_options=ecs.CloudMapOptions( name="master", dns_record_type=sd.DnsRecordType('A'))) self.jenkins_master_service = self.jenkins_master_service_main.service self.jenkins_master_task = self.jenkins_master_service.task_definition if config['DEFAULT']['ec2_enabled'] == "yes": self.jenkins_load_balancer = elb.ApplicationLoadBalancer( self, "JenkinsMasterELB", vpc=self.vpc.vpc, internet_facing=True, ) self.listener = self.jenkins_load_balancer.add_listener("Listener", port=80) self.jenkins_master_task = ecs.Ec2TaskDefinition( self, "JenkinsMasterTaskDef", network_mode=ecs.NetworkMode.AWS_VPC, volumes=[ ecs.Volume(name="efs_mount", host=ecs.Host(source_path='/mnt/efs')) ], ) self.jenkins_master_task.add_container( "JenkinsMasterContainer", image=ecs.ContainerImage.from_ecr_repository( self.container_image.repository), cpu=int(config['DEFAULT']['ec2_cpu']), memory_limit_mib=int( config['DEFAULT']['ec2_memory_limit_mib']), environment={ # https://github.com/jenkinsci/docker/blob/master/README.md#passing-jvm-parameters 'JAVA_OPTS': '-Djenkins.install.runSetupWizard=false', 'CASC_JENKINS_CONFIG': '/config-as-code.yaml', 'network_stack': self.vpc.stack_name, 'cluster_stack': self.cluster.stack_name, 'worker_stack': self.worker.stack_name, 'cluster_arn': self.cluster.cluster.cluster_arn, 'aws_region': config['DEFAULT']['region'], 'jenkins_url': config['DEFAULT']['jenkins_url'], 'subnet_ids': ",".join( [x.subnet_id for x in self.vpc.vpc.private_subnets]), 'security_group_ids': self.worker.worker_security_group.security_group_id, 'execution_role_arn': self.worker.worker_execution_role.role_arn, 'task_role_arn': self.worker.worker_task_role.role_arn, 'worker_log_group': self.worker.worker_logs_group.log_group_name, 'worker_log_stream_prefix': self.worker.worker_log_stream.log_stream_name }, logging=ecs.LogDriver.aws_logs( stream_prefix="JenkinsMaster", log_retention=logs.RetentionDays.ONE_WEEK), ) self.jenkins_master_task.default_container.add_mount_points( ecs.MountPoint(container_path='/var/jenkins_home', source_volume="efs_mount", read_only=False)) self.jenkins_master_task.default_container.add_port_mappings( ecs.PortMapping(container_port=8080, host_port=8080)) self.jenkins_master_service = ecs.Ec2Service( self, "EC2MasterService", task_definition=self.jenkins_master_task, cloud_map_options=ecs.CloudMapOptions( name="master", dns_record_type=sd.DnsRecordType('A')), desired_count=1, min_healthy_percent=0, max_healthy_percent=100, enable_ecs_managed_tags=True, cluster=self.cluster.cluster, ) self.target_group = self.listener.add_targets( "JenkinsMasterTarget", port=80, targets=[ self.jenkins_master_service.load_balancer_target( container_name=self.jenkins_master_task. default_container.container_name, container_port=8080, ) ], deregistration_delay=core.Duration.seconds(10)) # Opening port 5000 for master <--> worker communications self.jenkins_master_service.task_definition.default_container.add_port_mappings( ecs.PortMapping(container_port=50000, host_port=50000)) # Enable connection between Master and Worker self.jenkins_master_service.connections.allow_from( other=self.worker.worker_security_group, port_range=ec2.Port(protocol=ec2.Protocol.TCP, string_representation='Master to Worker 50000', from_port=50000, to_port=50000)) # Enable connection between Master and Worker on 8080 self.jenkins_master_service.connections.allow_from( other=self.worker.worker_security_group, port_range=ec2.Port(protocol=ec2.Protocol.TCP, string_representation='Master to Worker 8080', from_port=8080, to_port=8080)) # IAM Statements to allow jenkins ecs plugin to talk to ECS as well as the Jenkins cluster # self.jenkins_master_task.add_to_task_role_policy( iam.PolicyStatement( actions=[ "ecs:RegisterTaskDefinition", "ecs:DeregisterTaskDefinition", "ecs:ListClusters", "ecs:DescribeContainerInstances", "ecs:ListTaskDefinitions", "ecs:DescribeTaskDefinition", "ecs:DescribeTasks" ], resources=["*"], )) self.jenkins_master_task.add_to_task_role_policy( iam.PolicyStatement(actions=["ecs:ListContainerInstances"], resources=[self.cluster.cluster.cluster_arn])) self.jenkins_master_task.add_to_task_role_policy( iam.PolicyStatement( actions=["ecs:RunTask"], resources=[ "arn:aws:ecs:{0}:{1}:task-definition/fargate-workers*". format( self.region, self.account, ) ])) self.jenkins_master_task.add_to_task_role_policy( iam.PolicyStatement(actions=["ecs:StopTask"], resources=[ "arn:aws:ecs:{0}:{1}:task/*".format( self.region, self.account) ], conditions={ "ForAnyValue:ArnEquals": { "ecs:cluster": self.cluster.cluster.cluster_arn } })) self.jenkins_master_task.add_to_task_role_policy( iam.PolicyStatement(actions=["iam:PassRole"], resources=[ self.worker.worker_task_role.role_arn, self.worker.worker_execution_role.role_arn ])) # END OF JENKINS ECS PLUGIN IAM POLICIES # self.jenkins_master_task.add_to_task_role_policy( iam.PolicyStatement( actions=["*"], resources=[self.worker.worker_logs_group.log_group_arn]))
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/Vpc.html vpc = ec2.Vpc(self, "vpc", cidr=props['vpc_CIDR'], max_azs=3, subnet_configuration=[{ 'cidrMask': 28, 'name': 'public', 'subnetType': ec2.SubnetType.PUBLIC }, { 'cidrMask': 28, 'name': 'private', 'subnetType': ec2.SubnetType.PRIVATE }, { 'cidrMask': 28, 'name': 'db', 'subnetType': ec2.SubnetType.ISOLATED }]) rds_subnetGroup = rds.SubnetGroup( self, "rds_subnetGroup", description= f"Group for {props['environment']}-{props['application']}-{props['unit']} DB", vpc=vpc, vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.ISOLATED)) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_rds/DatabaseCluster.html ##TODO:ADD Aurora Serverless Option rds_instance = rds.DatabaseCluster( self, 'wordpress-db', engine=rds.DatabaseClusterEngine.aurora_mysql( version=rds.AuroraMysqlEngineVersion.VER_2_07_2), instances=1, instance_props=rds.InstanceProps( vpc=vpc, enable_performance_insights=props[ 'rds_enable_performance_insights'], instance_type=ec2.InstanceType( instance_type_identifier=props['rds_instance_type'])), subnet_group=rds_subnetGroup, storage_encrypted=props['rds_storage_encrypted'], backup=rds.BackupProps(retention=core.Duration.days( props['rds_automated_backup_retention_days']))) EcsToRdsSeurityGroup = ec2.SecurityGroup( self, "EcsToRdsSeurityGroup", vpc=vpc, description="Allow WordPress containers to talk to RDS") #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html db_cred_generator = _lambda.Function( self, 'db_creds_generator', runtime=_lambda.Runtime.PYTHON_3_8, handler='db_creds_generator.handler', code=_lambda.Code.asset('lambda/db_creds_generator'), vpc=vpc, vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.ISOLATED ), #vpc.select_subnets(subnet_type = ec2.SubnetType("ISOLATED")).subnets , environment={ 'SECRET_NAME': rds_instance.secret.secret_name, }) #Set Permissions and Sec Groups rds_instance.connections.allow_from( EcsToRdsSeurityGroup, ec2.Port.tcp(3306)) #Open hole to RDS in RDS SG #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html file_system = efs.FileSystem( self, "MyEfsFileSystem", vpc=vpc, encrypted=True, # file system is not encrypted by default lifecycle_policy=props['efs_lifecycle_policy'], performance_mode=efs.PerformanceMode.GENERAL_PURPOSE, throughput_mode=efs.ThroughputMode.BURSTING, removal_policy=core.RemovalPolicy(props['efs_removal_policy']), enable_automatic_backups=props['efs_automatic_backups']) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Cluster.html?highlight=ecs%20cluster#aws_cdk.aws_ecs.Cluster cluster = ecs.Cluster( self, "Cluster", vpc=vpc, container_insights=props['ecs_enable_container_insights']) if props['deploy_bastion_host']: #ToDo: Deploy bastion host with a key file #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/BastionHostLinux.html bastion_host = ec2.BastionHostLinux(self, 'bastion_host', vpc=vpc) rds_instance.connections.allow_from(bastion_host, ec2.Port.tcp(3306)) ####################### ### Developer Tools ### # SFTP into the EFS Shared File System NetToolsSecret = secretsmanager.Secret( self, "NetToolsSecret", generate_secret_string=secretsmanager.SecretStringGenerator( secret_string_template=json.dumps({ "username": '******', "ip": '' }), generate_string_key="password", exclude_characters='/"')) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html#aws_cdk.aws_efs.FileSystem.add_access_point AccessPoint = file_system.add_access_point( "access-point", path="/", create_acl=efs.Acl( owner_uid= "100", #https://aws.amazon.com/blogs/containers/developers-guide-to-using-amazon-efs-with-amazon-ecs-and-aws-fargate-part-2/ owner_gid="101", permissions="0755")) EfsVolume = ecs.Volume( name="efs", efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=file_system.file_system_id, transit_encryption="ENABLED", authorization_config=ecs.AuthorizationConfig( access_point_id=AccessPoint.access_point_id))) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html NetToolsTask = ecs.FargateTaskDefinition(self, "TaskDefinition", cpu=256, memory_limit_mib=512, volumes=[EfsVolume]) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html#aws_cdk.aws_ecs.FargateTaskDefinition.add_container NetToolsContainer = NetToolsTask.add_container( "NetTools", image=ecs.ContainerImage.from_registry('netresearch/sftp'), command=['test:test:100:101:efs']) NetToolsContainer.add_port_mappings( ecs.PortMapping(container_port=22, protocol=ecs.Protocol.TCP)) NetToolsContainer.add_mount_points( ecs.MountPoint( container_path= "/home/test/efs", #ToDo build path out with username from secret read_only=False, source_volume=EfsVolume.name, )) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateService.html?highlight=fargateservice#aws_cdk.aws_ecs.FargateService service = ecs.FargateService( self, "Service", cluster=cluster, task_definition=NetToolsTask, platform_version=ecs.FargatePlatformVersion( "VERSION1_4"), #Required for EFS ) #ToDo somehow store container's IP on deploy #Allow traffic to EFS Volume from Net Tools container service.connections.allow_to(file_system, ec2.Port.tcp(2049)) #ToDo allow bastion host into container on port 22 #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html bastion_ip_locator = _lambda.Function( self, 'bastion_ip_locator', function_name= f"{props['environment']}-{props['application']}-{props['unit']}-SFTP-IP", runtime=_lambda.Runtime.PYTHON_3_8, handler='bastion_ip_locator.handler', code=_lambda.Code.asset('lambda/bastion_ip_locator'), environment={ 'CLUSTER_NAME': cluster.cluster_arn, 'SERVICE_NAME': service.service_name }) #Give needed perms to bastion_ip_locator for reading info from ECS bastion_ip_locator.add_to_role_policy( iam.PolicyStatement( actions=["ecs:DescribeTasks"], resources=[ #f"arn:aws:ecs:us-east-1:348757191778:service/{cluster.cluster_name}/{service.service_name}", f"arn:aws:ecs:us-east-1:348757191778:task/{cluster.cluster_name}/*" ])) bastion_ip_locator.add_to_role_policy( iam.PolicyStatement(actions=[ "ecs:ListTasks", ], resources=["*"], conditions={ 'ArnEquals': { 'ecs:cluster': cluster.cluster_arn } })) self.output_props = props.copy() self.output_props["vpc"] = vpc self.output_props["rds_instance"] = rds_instance self.output_props["EcsToRdsSeurityGroup"] = EcsToRdsSeurityGroup self.output_props["file_system"] = file_system self.output_props["cluster"] = cluster
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) ################################################################################ # Set up permissions ro_buckets = set() for bucket in props['ro_buckets']: tmp_bucket = s3.Bucket.from_bucket_name(self, bucket, bucket_name=bucket) ro_buckets.add(tmp_bucket) rw_buckets = set() for bucket in props['rw_buckets']: tmp_bucket = s3.Bucket.from_bucket_name(self, bucket, bucket_name=bucket) rw_buckets.add(tmp_bucket) batch_service_role = iam.Role( self, 'BatchServiceRole', assumed_by=iam.ServicePrincipal('batch.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSBatchServiceRole') ]) spotfleet_role = iam.Role( self, 'AmazonEC2SpotFleetRole', assumed_by=iam.ServicePrincipal('spotfleet.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AmazonEC2SpotFleetTaggingRole') ]) # Create role for Batch instances batch_instance_role = iam.Role( self, 'BatchInstanceRole', role_name='UmccriseBatchInstanceRole', assumed_by=iam.CompositePrincipal( iam.ServicePrincipal('ec2.amazonaws.com'), iam.ServicePrincipal('ecs.amazonaws.com')), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AmazonEC2RoleforSSM'), iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AmazonEC2ContainerServiceforEC2Role') ]) batch_instance_role.add_to_policy( iam.PolicyStatement(actions=[ "ec2:Describe*", "ec2:AttachVolume", "ec2:CreateVolume", "ec2:CreateTags", "ec2:ModifyInstanceAttribute" ], resources=["*"])) batch_instance_role.add_to_policy( iam.PolicyStatement(actions=["ecs:ListClusters"], resources=["*"])) for bucket in ro_buckets: bucket.grant_read(batch_instance_role) for bucket in rw_buckets: # restirct write to paths with */umccrise/* bucket.grant_read_write(batch_instance_role, '*/umccrised/*') # Turn the instance role into a Instance Profile batch_instance_profile = iam.CfnInstanceProfile( self, 'BatchInstanceProfile', instance_profile_name='UmccriseBatchInstanceProfile', roles=[batch_instance_role.role_name]) ################################################################################ # Minimal networking # TODO: import resource created with TF vpc = props['vpc'] ################################################################################ # Setup Batch compute resources # Configure BlockDevice to expand instance disk space (if needed?) block_device_mappings = [{ 'deviceName': '/dev/xvdf', 'ebs': { 'deleteOnTermination': True, 'volumeSize': 1024, 'volumeType': 'gp2' } }] launch_template = ec2.CfnLaunchTemplate( self, 'UmccriseBatchComputeLaunchTemplate', launch_template_name='UmccriseBatchComputeLaunchTemplate', launch_template_data={ 'userData': core.Fn.base64(user_data_script), 'blockDeviceMappings': block_device_mappings }) launch_template_spec = batch.LaunchTemplateSpecification( launch_template_name=launch_template.launch_template_name, version='$Latest') my_compute_res = batch.ComputeResources( type=batch.ComputeResourceType.SPOT, allocation_strategy=batch.AllocationStrategy.BEST_FIT_PROGRESSIVE, desiredv_cpus=0, maxv_cpus=128, minv_cpus=0, image=ec2.MachineImage.generic_linux( ami_map={'ap-southeast-2': props['compute_env_ami']}), launch_template=launch_template_spec, spot_fleet_role=spotfleet_role, instance_role=batch_instance_profile.instance_profile_name, vpc=vpc, #compute_resources_tags=core.Tag('Creator', 'Batch') ) # XXX: How to add more than one tag above?? # core.Tag.add(my_compute_res, 'Foo', 'Bar') my_compute_env = batch.ComputeEnvironment( self, 'UmccriseBatchComputeEnv', compute_environment_name="cdk-umccrise-batch-compute-env", service_role=batch_service_role, compute_resources=my_compute_res) job_queue = batch.JobQueue(self, 'UmccriseJobQueue', job_queue_name='cdk-umccrise_job_queue', compute_environments=[ batch.JobQueueComputeEnvironment( compute_environment=my_compute_env, order=1) ], priority=10) job_container = batch.JobDefinitionContainer( image=ecs.ContainerImage.from_registry( name=props['container_image']), vcpus=2, memory_limit_mib=2048, command=["/opt/container/umccrise-wrapper.sh", "Ref::vcpus"], mount_points=[ ecs.MountPoint(container_path='/work', read_only=False, source_volume='work'), ecs.MountPoint(container_path='/opt/container', read_only=True, source_volume='container') ], volumes=[ ecs.Volume(name='container', host=ecs.Host(source_path='/opt/container')), ecs.Volume(name='work', host=ecs.Host(source_path='/mnt')) ], privileged=True) job_definition = batch.JobDefinition( self, 'UmccriseJobDefinition', job_definition_name='cdk-umccrise-job-definition', parameters={'vcpus': '1'}, container=job_container, timeout=core.Duration.hours(5)) ################################################################################ # Set up job submission Lambda lambda_role = iam.Role( self, 'UmccriseLambdaRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaBasicExecutionRole'), iam.ManagedPolicy.from_aws_managed_policy_name( 'AWSBatchFullAccess') # TODO: restrict! ]) for bucket in ro_buckets: bucket.grant_read(lambda_role) for bucket in rw_buckets: bucket.grant_read(lambda_role) # TODO: support dev/prod split, i.e. image being configurable on dev, but fixed on prod # may need a default JobDefinition to be set up lmbda.Function(self, 'UmccriseLambda', function_name='umccrise_batch_lambda', handler='umccrise.lambda_handler', runtime=lmbda.Runtime.PYTHON_3_7, code=lmbda.Code.from_asset('lambdas/umccrise'), environment={ 'JOBNAME_PREFIX': "UMCCRISE_", 'JOBQUEUE': job_queue.job_queue_name, 'REFDATA_BUCKET': props['refdata_bucket'], 'DATA_BUCKET': props['data_bucket'], 'UMCCRISE_MEM': '50000', 'UMCCRISE_VCPUS': '16' }, role=lambda_role)
def __init__(self, scope: core.Construct, id: str, region, domain, **kwargs) -> None: super().__init__(scope, id, **kwargs) # VPC , we need one for ECS cluster ( sadly ) vpc = ec2.Vpc.from_lookup(self, 'vpc', is_default=True) cluster = ecs.Cluster(self, 'Cluster', vpc=vpc) # Route53 & SSL Certificate zone = dns.HostedZone(self, "dns", zone_name=domain) dns.ARecord(self, 'MinecraftRecord', zone=zone, record_name='minecraft', target=dns.RecordTarget(values=['1.2.3.4'])) cert = acm.Certificate( self, 'cert', domain_name=f'*.{domain}', validation=acm.CertificateValidation.from_dns(zone)) # ECS ( Cluster, EFS, Task Def) fs = efs.FileSystem(self, 'EFS', vpc=vpc, removal_policy=core.RemovalPolicy.DESTROY) task_definition = ecs.FargateTaskDefinition(self, 'TaskDef', memory_limit_mib=4096, cpu=1024) container = task_definition.add_container( 'MinecraftDocker', image=ecs.ContainerImage.from_registry('darevee/minecraft-aws'), logging=ecs.AwsLogDriver(stream_prefix='Minecraf'), cpu=1024, memory_limit_mib=4096) container.add_mount_points( ecs.MountPoint(container_path='/minecraft', source_volume='efs', read_only=False)) cfn_task = container.task_definition.node.default_child cfn_task.add_property_override("Volumes", [{ "EFSVolumeConfiguration": { "FilesystemId": fs.file_system_id }, "Name": "efs" }]) container.add_port_mappings(ecs.PortMapping(container_port=25565)) sg = ec2.SecurityGroup(self, 'sg', vpc=vpc) sg.add_ingress_rule(peer=ec2.Peer.any_ipv4(), connection=ec2.Port.tcp(25565), description='Minecraft Access') sg.add_ingress_rule(peer=ec2.Peer.any_ipv4(), connection=ec2.Port.tcp(25575), description='RCONN Access') fs.connections.allow_default_port_from(sg) subnets = ",".join(vpc.select_subnets().subnet_ids) # Cognito ( For ApiGW Authentication) userpool = cognito.UserPool( self, 'UserPool', user_invitation=cognito.UserInvitationConfig( email_body= """No cześć {username}, zostałeś zaproszony do naszego Minecraft! Twoje tymczasowe hasło to {####} """, email_subject="Zaproszenie do minecrafta")) # APIGW (Gateway, Lambdas, S3 Static content) # Lambda Starter starter = _lambda.Function(self, 'Starter', runtime=_lambda.Runtime.PYTHON_3_8, handler='index.lambda_handler', code=_lambda.Code.asset('lambda/starter'), timeout=core.Duration.seconds(300), environment={ 'cluster': cluster.cluster_name, 'subnets': subnets, 'security_groups': sg.security_group_id, 'task_definition': task_definition.task_definition_arn, 'region': region, 'zone_id': zone.hosted_zone_id, 'domain': domain }) starter.add_to_role_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, resources=["*"], actions=[ "ecs:ListTasks", "ecs:DescribeTasks", "ec2:DescribeNetworkInterfaces" ])) starter.add_to_role_policy( iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=[task_definition.task_definition_arn], actions=["ecs:RunTask", "ecs:DescribeTasks"])) starter.add_to_role_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, resources=[ task_definition.task_role.role_arn, task_definition.execution_role.role_arn ], actions=["iam:PassRole"])) starter.add_to_role_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, resources=[zone.hosted_zone_arn], actions=["route53:ChangeResourceRecordSets"])) # S3 static webpage bucket = s3.Bucket(self, "S3WWW", public_read_access=True, removal_policy=core.RemovalPolicy.DESTROY, website_index_document="index.html") s3d.BucketDeployment(self, "S3Deploy", destination_bucket=bucket, sources=[s3d.Source.asset("static_page")]) status = _lambda.Function(self, 'Status', runtime=_lambda.Runtime.PYTHON_3_8, handler='index.lambda_handler', code=_lambda.Code.asset('lambda/status'), environment={ 'url': f"https://minecrafter.{domain}", 'domain': domain }) # ApiGW apigw = api.LambdaRestApi(self, 'ApiGW', handler=status, proxy=False, domain_name={ "domain_name": f'minecrafter.{domain}', "certificate": cert }, default_cors_preflight_options={ "allow_origins": api.Cors.ALL_ORIGINS, "allow_methods": api.Cors.ALL_METHODS }) start = apigw.root.add_resource('start') start.add_method('ANY', integration=api.LambdaIntegration(starter)) apigw.root.add_method('ANY') dns.ARecord(self, 'PointDNSToApiGW', zone=zone, target=dns.RecordTarget.from_alias( targets.ApiGateway(apigw)), record_name=f"minecrafter.{domain}")
def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc, cluster: ecs.Cluster, repository: ecr.Repository, **kwargs) -> None: super().__init__(scope, id, **kwargs) namespace = servicediscovery.PrivateDnsNamespace( scope=self, id="PRIVATE-DNS", vpc=vpc, name="private", description="a private dns" ) sg = ec2.SecurityGroup( scope=self, id="SG", vpc=vpc, allow_all_outbound=True, description="open 9200 and 9300 ports", security_group_name="es-group" ) sg.add_ingress_rule( peer=ec2.Peer.any_ipv4(), connection=ec2.Port.tcp(port=9200), ) sg.add_ingress_rule( peer=ec2.Peer.any_ipv4(), connection=ec2.Port.tcp(port=9300), ) ##################################################### elastic_task_def = ecs.Ec2TaskDefinition( scope=self, id="ES-TASK-DEF", network_mode=ecs.NetworkMode.AWS_VPC, volumes=[ecs.Volume( name="esdata", host=ecs.Host(source_path="/usr/share/elasticsearch/data"), )], ) elastic = ecs.ContainerDefinition( scope=self, id=constants.ES_CONTAINER_NAME, start_timeout=core.Duration.seconds(amount=30), task_definition=elastic_task_def, memory_limit_mib=4500, essential=True, image=ecs.ContainerImage.from_ecr_repository( repository=repository, tag='latest'), environment={ "cluster.name": constants.ES_CLUSTER_NAME, "bootstrap.memory_lock": "true", # "discovery.zen.ping.unicast.hosts": "elasticsearch", "node.name": constants.ES_CONTAINER_NAME, "node.master": "true", "node.data": "true", "ES_JAVA_OPTS": "-Xms4g -Xmx4g", }, logging=ecs.AwsLogDriver( stream_prefix="ES", log_retention=logs.RetentionDays.ONE_DAY, ), ) elastic.add_ulimits(ecs.Ulimit( name=ecs.UlimitName.NOFILE, hard_limit=65535, soft_limit=65535)) elastic.add_ulimits(ecs.Ulimit( name=ecs.UlimitName.MEMLOCK, hard_limit=-1, soft_limit=-1)) elastic.add_port_mappings(ecs.PortMapping(container_port=9200)) elastic.add_port_mappings(ecs.PortMapping(container_port=9300)) elastic.add_mount_points(ecs.MountPoint( container_path="/usr/share/elasticsearch/data", source_volume="esdata", read_only=False, )) # elastic.add_volumes_from(ecs.VolumeFrom( # source_container="esdata", # read_only=False, # )) es_service = ecs.Ec2Service( scope=self, id="ES-SERVICE", cluster=cluster, task_definition=elastic_task_def, desired_count=1, service_name="ES", security_group=sg, ) es_lb = elbv2.ApplicationLoadBalancer( scope=self, id="ES-ELB", vpc=vpc, internet_facing=True, ) es_listener = es_lb.add_listener( id="ES-LISTENER", port=80, ) es_service.register_load_balancer_targets( ecs.EcsTarget( new_target_group_id="ES-GRP", container_name=elastic.container_name, listener=ecs.ListenerConfig.application_listener( listener=es_listener, protocol=elbv2.ApplicationProtocol.HTTP), )) service = es_service.enable_cloud_map( cloud_map_namespace=namespace, dns_record_type=servicediscovery.DnsRecordType.A, # dns_ttl=core.Duration.seconds(amount=30), failure_threshold=1, name="elastic", ) core.CfnOutput( scope=self, id="DNS-ES", value=es_lb.load_balancer_dns_name, ) ##################################################### node_task_def = ecs.Ec2TaskDefinition( scope=self, id="NODE-TASK-DEF", network_mode=ecs.NetworkMode.AWS_VPC, volumes=[ecs.Volume( name="esdata", host=ecs.Host(source_path="/usr/share/elasticsearch/data"), )], ) node = ecs.ContainerDefinition( scope=self, id=constants.ES_NODE_CONTAINER_NAME, start_timeout=core.Duration.seconds(amount=40), task_definition=node_task_def, memory_limit_mib=4500, essential=True, image=ecs.ContainerImage.from_ecr_repository( repository=repository, tag='latest'), environment={ "cluster.name": constants.ES_CLUSTER_NAME, "bootstrap.memory_lock": "true", "discovery.zen.ping.unicast.hosts": "elastic.private", "node.name": constants.ES_NODE_CONTAINER_NAME, "node.master": "false", "node.data": "true", "ES_JAVA_OPTS": "-Xms4g -Xmx4g", }, logging=ecs.LogDrivers.aws_logs( stream_prefix="NODE", log_retention=logs.RetentionDays.ONE_DAY, )) node.add_port_mappings(ecs.PortMapping(container_port=9200)) node.add_port_mappings(ecs.PortMapping(container_port=9300)) node.add_ulimits(ecs.Ulimit( name=ecs.UlimitName.NOFILE, hard_limit=65536, soft_limit=65536)) node.add_ulimits(ecs.Ulimit( name=ecs.UlimitName.MEMLOCK, hard_limit=-1, soft_limit=-1)) node.add_mount_points(ecs.MountPoint( container_path="/usr/share/elasticsearch/data", source_volume="esdata", read_only=False, )) node_service = ecs.Ec2Service( scope=self, id="ES-NODE-SERVICE", cluster=cluster, task_definition=node_task_def, desired_count=1, service_name="NODE", security_group=sg, ) node_lb = elbv2.ApplicationLoadBalancer( scope=self, id="NODE-ELB", vpc=vpc, internet_facing=True, ) node_listener = node_lb.add_listener( id="NODE-LISTENER", port=80, ) node_service.register_load_balancer_targets( ecs.EcsTarget( new_target_group_id="NODE-GRP", container_name=node.container_name, listener=ecs.ListenerConfig.application_listener( listener=node_listener, protocol=elbv2.ApplicationProtocol.HTTP), )) core.CfnOutput( scope=self, id="DNS-NODE", value=node_lb.load_balancer_dns_name, )
def __init__(self, scope: core.Construct, id_: str, props, **kwargs) -> None: super().__init__(scope, id_, **kwargs) namespace = props['namespace'] htsget_refserver_ecr_repo: ecr.Repository = props['ecr_repo'] htsget_refserver_image_tag = props['htsget_refserver_image_tag'] cors_allowed_origins = props['cors_allowed_origins'] # --- Query deployment env specific config from SSM Parameter Store cert_apse2_arn = ssm.StringParameter.from_string_parameter_name( self, "SSLCertAPSE2ARN", string_parameter_name="/htsget/acm/apse2_arn", ) cert_apse2 = acm.Certificate.from_certificate_arn( self, "SSLCertAPSE2", certificate_arn=cert_apse2_arn.string_value, ) hosted_zone_id = ssm.StringParameter.from_string_parameter_name( self, "HostedZoneID", string_parameter_name="hosted_zone_id") hosted_zone_name = ssm.StringParameter.from_string_parameter_name( self, "HostedZoneName", string_parameter_name="hosted_zone_name") domain_name = ssm.StringParameter.from_string_parameter_name( self, "DomainName", string_parameter_name="/htsget/domain", ) # --- Cognito parameters are from data portal terraform stack cog_user_pool_id = ssm.StringParameter.from_string_parameter_name( self, "CogUserPoolID", string_parameter_name="/data_portal/client/cog_user_pool_id", ) cog_app_client_id_stage = ssm.StringParameter.from_string_parameter_name( self, "CogAppClientIDStage", string_parameter_name="/data_portal/client/cog_app_client_id_stage", ) cog_app_client_id_local = ssm.StringParameter.from_string_parameter_name( self, "CogAppClientIDLocal", string_parameter_name="/data_portal/client/cog_app_client_id_local", ) # --- Query main VPC and setup Security Groups vpc = ec2.Vpc.from_lookup( self, "VPC", vpc_name="main-vpc", tags={ 'Stack': "networking", }, ) private_subnets = ec2.SubnetSelection( subnet_type=ec2.SubnetType.PRIVATE, availability_zones=["ap-southeast-2a"], ) sg_elb = ec2.SecurityGroup( self, "ELBSecurityGroup", vpc=vpc, description=f"Security Group for ELB in {namespace} stack", security_group_name=f"{namespace} ELB Security Group", allow_all_outbound=False, ) sg_elb.add_ingress_rule(peer=ec2.Peer.any_ipv4(), connection=ec2.Port.tcp(80), description="Allow http inbound within VPC") sg_ecs_service = ec2.SecurityGroup( self, "ECSServiceSecurityGroup", vpc=vpc, description=f"Security Group for ECS Service in {namespace} stack", security_group_name=f"{namespace} ECS Security Group", ) sg_ecs_service.add_ingress_rule( peer=sg_elb, connection=ec2.Port.tcp(3000), description="Allow traffic from Load balancer to ECS service") # --- Setup ECS Fargate cluster config_vol = ecs.Volume( name="config-vol", host=ecs.Host(), ) task_execution_role = iam.Role( self, "ecsTaskExecutionRole", assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com")) task_execution_role.add_to_policy( iam.PolicyStatement( actions=[ "s3:GetBucketLocation", "s3:GetObject", "s3:ListBucket", "s3:ListBucketMultipartUploads", "s3:ListMultipartUploadParts", "s3:GetObjectTagging", "s3:GetObjectVersionTagging", "logs:CreateLogStream", "logs:PutLogEvents", "ssm:GetParameterHistory", "ssm:GetParametersByPath", "ssm:GetParameters", "ssm:GetParameter", ], resources=["*"], )) task_execution_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AmazonECSTaskExecutionRolePolicy')) task = ecs.FargateTaskDefinition( self, f"{namespace}-task", cpu=512, memory_limit_mib=1024, volumes=[config_vol], task_role=task_execution_role, execution_role=task_execution_role, ) cmd_ssm = "ssm get-parameter --name '/htsget/refserver/config' --output text --query Parameter.Value" sidecar_container: ecs.ContainerDefinition = task.add_container( f"{namespace}-sidecar", image=ecs.ContainerImage.from_registry( "quay.io/victorskl/aws-cli:2.1.3"), essential=False, entry_point=[ "/bin/bash", "-c", f"aws {cmd_ssm} > config.json", ], logging=ecs.LogDriver.aws_logs(stream_prefix=f"{namespace}", ), ) sidecar_container.add_mount_points( ecs.MountPoint( container_path="/aws", read_only=False, source_volume=config_vol.name, )) main_container: ecs.ContainerDefinition = task.add_container( namespace, image=ecs.ContainerImage.from_ecr_repository( repository=htsget_refserver_ecr_repo, tag=htsget_refserver_image_tag, ), essential=True, command=[ "./htsget-refserver", "-config", "/usr/src/app/config/config.json" ], logging=ecs.LogDriver.aws_logs(stream_prefix=f"{namespace}", ), ) main_container.add_port_mappings( ecs.PortMapping( container_port=3000, protocol=ecs.Protocol.TCP, )) main_container.add_mount_points( ecs.MountPoint( container_path="/usr/src/app/config", read_only=True, source_volume=config_vol.name, )) main_container.add_container_dependencies( ecs.ContainerDependency( container=sidecar_container, condition=ecs.ContainerDependencyCondition.COMPLETE, )) cluster = ecs.Cluster(self, f"{namespace}-cluster", vpc=vpc) service = ecs.FargateService( self, f"{namespace}-service", platform_version=ecs.FargatePlatformVersion.VERSION1_4, task_definition=task, cluster=cluster, vpc_subnets=private_subnets, desired_count=1, security_groups=[ sg_ecs_service, ], ) # --- Setup Application Load Balancer in front of ECS cluster lb = elbv2.ApplicationLoadBalancer( self, f"{namespace}-lb", vpc=vpc, internet_facing=False, security_group=sg_elb, deletion_protection=True, ) http_listener = lb.add_listener( "HttpLBListener", port=80, ) health_check = elbv2.HealthCheck(interval=core.Duration.seconds(30), path="/reads/service-info", timeout=core.Duration.seconds(5)) http_listener.add_targets( "LBtoECS", port=3000, protocol=elbv2.ApplicationProtocol.HTTP, targets=[service], health_check=health_check, ) core.CfnOutput(self, "LoadBalancerDNS", value=lb.load_balancer_dns_name) # --- Setup APIGatewayv2 HttpApi using VpcLink private integration to ALB/ECS in private subnets vpc_link = apigwv2.VpcLink(self, f"{namespace}-VpcLink", vpc=vpc, security_groups=[ sg_ecs_service, sg_elb, ]) self.apigwv2_alb_integration = apigwv2i.HttpAlbIntegration( listener=http_listener, vpc_link=vpc_link, ) custom_domain = apigwv2.DomainName( self, "CustomDomain", certificate=cert_apse2, domain_name=domain_name.string_value, ) self.http_api = apigwv2.HttpApi( self, f"{namespace}-apigw", default_domain_mapping=apigwv2.DomainMappingOptions( domain_name=custom_domain), cors_preflight=apigwv2.CorsPreflightOptions( allow_origins=cors_allowed_origins, allow_headers=["*"], allow_methods=[ apigwv2.CorsHttpMethod.ANY, ], allow_credentials=True, )) core.CfnOutput(self, "ApiEndpoint", value=self.http_api.api_endpoint) # --- Setup DNS for the custom domain hosted_zone = route53.HostedZone.from_hosted_zone_attributes( self, "HostedZone", hosted_zone_id=hosted_zone_id.string_value, zone_name=hosted_zone_name.string_value, ) route53.ARecord( self, "ApiCustomDomainAlias", zone=hosted_zone, record_name="htsget", target=route53.RecordTarget.from_alias( route53t.ApiGatewayv2DomainProperties( regional_domain_name=custom_domain.regional_domain_name, regional_hosted_zone_id=custom_domain. regional_hosted_zone_id)), ) core.CfnOutput( self, "HtsgetEndpoint", value=custom_domain.name, ) cognito_authzr = apigwv2.CfnAuthorizer( self, "CognitoAuthorizer", api_id=self.http_api.http_api_id, authorizer_type="JWT", identity_source=[ "$request.header.Authorization", ], name="CognitoAuthorizer", jwt_configuration=apigwv2.CfnAuthorizer.JWTConfigurationProperty( audience=[ cog_app_client_id_stage.string_value, cog_app_client_id_local.string_value, ], issuer= f"https://cognito-idp.{self.region}.amazonaws.com/{cog_user_pool_id.string_value}" )) # Add catch all routes rt_catchall = apigwv2.HttpRoute( self, "CatchallRoute", http_api=self.http_api, route_key=apigwv2.HttpRouteKey.with_( path="/{proxy+}", method=apigwv2.HttpMethod.GET), integration=self.apigwv2_alb_integration) rt_catchall_cfn: apigwv2.CfnRoute = rt_catchall.node.default_child rt_catchall_cfn.authorizer_id = cognito_authzr.ref rt_catchall_cfn.authorization_type = "JWT" # Comment this to opt-out setting up experimental Passport + htsget self.setup_ga4gh_passport()
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Get config value for alert email email = self.node.try_get_context("email") if email == 'changeme@localhost': exit( 'ERROR: Change the email in cdk.json or pass it with -c email=changeme@localhost' ) # Create SNS for alarms to be sent to alarm_topic = sns.Topic(self, "backup_alarm", display_name="backup_alarm") # Subscribe my email so the alarms go to me alarm_topic.add_subscription(subscriptions.EmailSubscription(email)) # Create VPC to run everything in. We make this public just because we don't # want to spend $30/mo on a NAT gateway. vpc = ec2.Vpc( self, "VPC", nat_gateways=0, subnet_configuration=[ ec2.SubnetConfiguration(name="public", subnet_type=ec2.SubnetType.PUBLIC) ], ) ecs_sg = ec2.SecurityGroup(self, "ecs_sg", vpc=vpc) efs_sg = ec2.SecurityGroup(self, "efs_sg", vpc=vpc) efs_sg.add_ingress_rule( peer=ecs_sg, connection=ec2.Port.tcp(2049), description="Allow backup runner access", ) # Open this to the VPC efs_sg.add_ingress_rule( peer=ec2.Peer.ipv4("10.0.0.0/8"), connection=ec2.Port.tcp(2049), description="Allow backup runner access", ) # Define the EFS fileSystem = efs.FileSystem( self, "MyEfsFileSystem", vpc=vpc, encrypted=True, lifecycle_policy=efs.LifecyclePolicy.AFTER_7_DAYS, performance_mode=efs.PerformanceMode.GENERAL_PURPOSE, throughput_mode=efs.ThroughputMode.BURSTING, security_group=efs_sg, ) # Define the ECS task cluster = ecs.Cluster(self, "Cluster", vpc=vpc) taskDefinition = ecs.FargateTaskDefinition( self, "taskDefinition", volumes=[ ecs.Volume( name="efsvolume", efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=fileSystem.file_system_id, root_directory="/", transit_encryption="ENABLED", ), ) ], memory_limit_mib=8192, cpu=2048, ) log_driver = ecs.AwsLogDriver( stream_prefix="backup_runner", log_retention=logs.RetentionDays.TWO_WEEKS, ) taskDefinition.add_container( "backup-runner", image=ecs.ContainerImage.from_asset("./resources/backup_runner"), memory_limit_mib=8192, cpu=2048, logging=log_driver, ) # The previous method to add the container doesn't let us specify the mount point for the EFS, # so we have to do it here, and referencing the container that was just added. taskDefinition.default_container.add_mount_points( ecs.MountPoint(container_path="/mnt/efs", read_only=False, source_volume="efsvolume")) # Create rule to trigger this be run every 24 hours events.Rule( self, "scheduled_run", rule_name="backup_runner", # Run at 2am EST (6am UTC) every night schedule=events.Schedule.expression("cron(0 0 * * ? *)"), description="Starts the backup runner task every night", targets=[ targets.EcsTask( cluster=cluster, task_definition=taskDefinition, subnet_selection=ec2.SubnetSelection( subnet_type=ec2.SubnetType.PUBLIC), platform_version=ecs.FargatePlatformVersion. VERSION1_4, # Required to use EFS # Because "Latest" does not yet support EFS security_groups=[ecs_sg], ) ], ) # Create notification topic for backups backup_topic = sns.Topic(self, "backup_topic", display_name="Backup status") # Create AWS Backup vault = backup.BackupVault( self, "Vault", access_policy=iam.PolicyDocument(statements=[ iam.PolicyStatement( effect=iam.Effect.DENY, actions=[ "backup:DeleteBackupVault", "backup:DeleteRecoveryPoint", "backup:UpdateRecoveryPointLifecycle", # "backup:PutBackupVaultAccessPolicy", # This results in "Failed putting policy for Backup vault backuprunnerVaultXXX as it will lock down from further policy changes" "backup:DeleteBackupVaultAccessPolicy", "backup:DeleteBackupVaultNotifications", # "backup:PutBackupVaultNotifications", # This causes oher part of this app to fail. ], resources=["*"], principals=[iam.AnyPrincipal()], ) ]), notification_topic=alarm_topic, notification_events=[ # Monitor for some failures or access to the backups backup.BackupVaultEvents.BACKUP_JOB_EXPIRED, backup.BackupVaultEvents.BACKUP_JOB_FAILED, backup.BackupVaultEvents.COPY_JOB_FAILED, backup.BackupVaultEvents.COPY_JOB_FAILED, backup.BackupVaultEvents.COPY_JOB_STARTED, backup.BackupVaultEvents.RESTORE_JOB_COMPLETED, backup.BackupVaultEvents.RESTORE_JOB_FAILED, backup.BackupVaultEvents.RESTORE_JOB_STARTED, backup.BackupVaultEvents.RESTORE_JOB_SUCCESSFUL, ], ) plan = backup.BackupPlan.daily35_day_retention(self, "backup") plan.add_selection( "Selection", resources=[backup.BackupResource.from_efs_file_system(fileSystem)], ) # # Create metric filter for errors in the CloudWatch Logs from the ECS # METRIC_NAME = "log_errors" METRIC_NAMESPACE = "backup_runner" metric = cloudwatch.Metric(namespace=METRIC_NAMESPACE, metric_name=METRIC_NAME) error_metric = logs.MetricFilter( self, "MetricFilterId", metric_name=METRIC_NAME, metric_namespace=METRIC_NAMESPACE, log_group=log_driver.log_group, filter_pattern=logs.FilterPattern.any_term("ERROR"), metric_value="1", ) error_alarm = cloudwatch.Alarm( self, "AlarmId", metric=metric, evaluation_periods=1, actions_enabled=True, alarm_name="backuper_runner_alarm", alarm_description="Errors in backup runner", comparison_operator=cloudwatch.ComparisonOperator. GREATER_THAN_OR_EQUAL_TO_THRESHOLD, treat_missing_data=cloudwatch.TreatMissingData.NOT_BREACHING, period=core.Duration.hours(1), threshold=1, statistic="sum", ) # Connect the alarm to the SNS error_alarm.add_alarm_action(cloudwatch_actions.SnsAction(alarm_topic)) # The above doesn't give it privileges, so add them to the alarm topic resource policy. alarm_topic.add_to_resource_policy( iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["sns:Publish"], resources=[alarm_topic.topic_arn], principals=[iam.ServicePrincipal("cloudwatch.amazonaws.com")], ))
def __init__(self, scope: core.Construct, id: str, custom_vpc, efs_share, efs_ap_nginx, enable_container_insights: bool = False, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Create Security Group to allow Fargate Cluster instances to access EFS. web_svc_sg = _ec2.SecurityGroup( self, id="webSvcSecurityGroup", vpc=custom_vpc, security_group_name=f"web_svc_sg_{id}", description= "Security Group to allow Fargate Cluster instances to access EFS") # Allow Internet access to Fargate web service web_svc_sg.add_ingress_rule( _ec2.Peer.any_ipv4(), _ec2.Port.tcp(80), description="Allow Internet access to web service") # The code that defines your stack goes here fargate_cluster = _ecs.Cluster( self, "fargateClusterId", cluster_name=f"web-app-{id}", # container_insights=enable_container_insights, vpc=custom_vpc) web_app_task_def = _ecs.FargateTaskDefinition( self, "webAppTaskDef", cpu=256, memory_limit_mib=512, ) # Add EFS Volume to TaskDef web_app_task_def.add_volume( name="html", efs_volume_configuration=_ecs.EfsVolumeConfiguration( file_system_id=efs_share.file_system_id, transit_encryption="ENABLED", authorization_config=_ecs.AuthorizationConfig( access_point_id=efs_ap_nginx.access_point_id))) web_app_container = web_app_task_def.add_container( "webAppContainer", cpu=256, memory_limit_mib=512, environment={ "github": "https://github.com/miztiik", "ko_fi": "https://ko-fi.com/miztiik" }, image=_ecs.ContainerImage.from_registry("nginx:latest"), logging=_ecs.LogDrivers.aws_logs( stream_prefix="mystique-automation-logs", log_retention=_logs.RetentionDays.ONE_DAY)) web_app_container.add_ulimits( _ecs.Ulimit(name=_ecs.UlimitName.NOFILE, soft_limit=65536, hard_limit=65536)) web_app_container.add_port_mappings( _ecs.PortMapping(container_port=80, protocol=_ecs.Protocol.TCP)) web_app_container.add_port_mappings( _ecs.PortMapping(container_port=443, protocol=_ecs.Protocol.TCP)) # Mount EFS Volume to Web Server Container web_app_container.add_mount_points( _ecs.MountPoint(container_path="/usr/share/nginx/html", read_only=False, source_volume="html")) # Launch service and attach load balancer using CDK Pattern web_app_service = _ecs_patterns.ApplicationLoadBalancedFargateService( self, "webSrv", platform_version=_ecs.FargatePlatformVersion.VERSION1_4, cluster=fargate_cluster, task_definition=web_app_task_def, assign_public_ip=False, public_load_balancer=True, listener_port=80, desired_count=1, # enable_ecs_managed_tags=True, health_check_grace_period=core.Duration.seconds(60), # cpu=1024, # memory_limit_mib=2048, # service_name="chatAppService", ) # Outputs output_0 = core.CfnOutput( self, "AutomationFrom", value=f"{GlobalArgs.SOURCE_INFO}", description= "To know more about this automation stack, check out our github page." ) output_1 = core.CfnOutput( self, "ClusterNameOutput", value=f"{fargate_cluster.cluster_name}", description= "To know more about this automation stack, check out our github page." ) output_2 = core.CfnOutput( self, "webAppServiceUrl", value= f"http://{web_app_service.load_balancer.load_balancer_dns_name}", description= "Use an utility like curl or an browser to access the web server.")
def __init__(self, scope: core.Construct, construct_id: str, properties: WordpressStackProperties, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) database = rds.ServerlessCluster( self, "WordpressServerless", engine=rds.DatabaseClusterEngine.AURORA_MYSQL, default_database_name="WordpressDatabase", vpc=properties.vpc, scaling=rds.ServerlessScalingOptions( auto_pause=core.Duration.seconds(0)), deletion_protection=False, backup_retention=core.Duration.days(7), removal_policy=core.RemovalPolicy.DESTROY, ) file_system = efs.FileSystem( self, "WebRoot", vpc=properties.vpc, performance_mode=efs.PerformanceMode.GENERAL_PURPOSE, throughput_mode=efs.ThroughputMode.BURSTING, ) # docker context directory docker_context_path = os.path.dirname(__file__) + "../../src" # upload images to ecr nginx_image = ecr_assets.DockerImageAsset( self, "Nginx", directory=docker_context_path, file="Docker.nginx", ) wordpress_image = ecr_assets.DockerImageAsset( self, "Php", directory=docker_context_path, file="Docker.wordpress", ) cluster = ecs.Cluster(self, 'ComputeResourceProvider', vpc=properties.vpc) wordpress_volume = ecs.Volume( name="WebRoot", efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=file_system.file_system_id)) event_task = ecs.FargateTaskDefinition(self, "WordpressTask", volumes=[wordpress_volume]) # # webserver # nginx_container = event_task.add_container( "Nginx", image=ecs.ContainerImage.from_docker_image_asset(nginx_image)) nginx_container.add_port_mappings(ecs.PortMapping(container_port=80)) nginx_container_volume_mount_point = ecs.MountPoint( read_only=True, container_path="/var/www/html", source_volume=wordpress_volume.name) nginx_container.add_mount_points(nginx_container_volume_mount_point) # # application server # app_container = event_task.add_container( "Php", environment={ 'WORDPRESS_DB_HOST': database.cluster_endpoint.hostname, 'WORDPRESS_TABLE_PREFIX': 'wp_' }, secrets={ 'WORDPRESS_DB_USER': ecs.Secret.from_secrets_manager(database.secret, field="username"), 'WORDPRESS_DB_PASSWORD': ecs.Secret.from_secrets_manager(database.secret, field="password"), 'WORDPRESS_DB_NAME': ecs.Secret.from_secrets_manager(database.secret, field="dbname"), }, image=ecs.ContainerImage.from_docker_image_asset(wordpress_image)) app_container.add_port_mappings(ecs.PortMapping(container_port=9000)) container_volume_mount_point = ecs.MountPoint( read_only=False, container_path="/var/www/html", source_volume=wordpress_volume.name) app_container.add_mount_points(container_volume_mount_point) # # create service # wordpress_service = ecs.FargateService( self, "InternalService", task_definition=event_task, platform_version=ecs.FargatePlatformVersion.VERSION1_4, cluster=cluster, ) # # scaling # scaling = wordpress_service.auto_scale_task_count(min_capacity=2, max_capacity=50) scaling.scale_on_cpu_utilization( "CpuScaling", target_utilization_percent=85, scale_in_cooldown=core.Duration.seconds(120), scale_out_cooldown=core.Duration.seconds(30), ) # # network acl # database.connections.allow_default_port_from(wordpress_service, "wordpress access to db") file_system.connections.allow_default_port_from(wordpress_service) # # external access # wordpress_service.connections.allow_from( other=properties.load_balancer, port_range=ec2.Port.tcp(80)) http_listener = properties.load_balancer.add_listener( "HttpListener", port=80, ) http_listener.add_targets( "HttpServiceTarget", protocol=elbv2.ApplicationProtocol.HTTP, targets=[wordpress_service], health_check=elbv2.HealthCheck(healthy_http_codes="200,301,302"))
def setup_monitoring(self): vpc = self.vpc sg = self.sg nlb = self.nlb with open("./user_data/prometheus.yml") as f: prometheus_config = f.read() sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(9090), 'prometheus') sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(9100), 'prometheus node exporter') sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(9091), 'prometheus pushgateway') sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(3000), 'grafana') cluster = ecs.Cluster(self, "Monitoring", vpc=vpc) task = ecs.FargateTaskDefinition(self, id = 'MonitorTask', cpu = 512, memory_limit_mib = 2048 #volumes = [ecs.Volume(name = cfgVolName)] ) task.add_volume(name = 'prom_config') c_config = task.add_container('config-prometheus', image=ecs.ContainerImage.from_registry('bash'), essential=False, logging = ecs.LogDriver.aws_logs(stream_prefix="mon_config_prometheus", log_retention = aws_logs.RetentionDays.ONE_DAY ), command = [ "-c", "echo $DATA | base64 -d - | tee /tmp/private/prometheus.yml" ], environment = {'DATA' : cdk.Fn.base64(prometheus_config)} ) c_config.add_mount_points(ecs.MountPoint(read_only = False, container_path='/tmp/private', source_volume='prom_config')) c_prometheus = task.add_container('prometheus', essential=False, image=ecs.ContainerImage.from_registry('prom/prometheus'), port_mappings = [ecs.PortMapping(container_port=9090)], command = [ "--config.file=/etc/prometheus/private/prometheus.yml", "--storage.tsdb.path=/prometheus", "--web.console.libraries=/usr/share/prometheus/console_libraries", "--web.console.templates=/usr/share/prometheus/consoles" ], logging = ecs.LogDriver.aws_logs(stream_prefix="mon_prometheus", log_retention = aws_logs.RetentionDays.ONE_DAY ), ) c_prometheus.add_mount_points(ecs.MountPoint(read_only = False, container_path='/etc/prometheus/private', source_volume='prom_config')) c_prometheus.add_container_dependencies(ecs.ContainerDependency(container=c_config, condition=ecs.ContainerDependencyCondition.COMPLETE)) c_pushgateway = task.add_container('pushgateway', essential=False, image=ecs.ContainerImage.from_registry('prom/pushgateway'), port_mappings = [ecs.PortMapping(container_port=9091)] ) c_grafana = task.add_container('grafana', essential=True, image=ecs.ContainerImage.from_registry('grafana/grafana'), port_mappings = [ecs.PortMapping(container_port=3000)] ) service = ecs.FargateService(self, "EMQXMonitoring", security_group = self.sg, cluster = cluster, task_definition = task, desired_count = 1, assign_public_ip = True ) listenerGrafana = nlb.add_listener('grafana', port = 3000); listenerPrometheus = nlb.add_listener('prometheus', port = 9090); listenerPushGateway = nlb.add_listener('pushgateway', port = 9091); listenerGrafana.add_targets(id = 'grafana', port=3000, targets = [service.load_balancer_target( container_name="grafana", container_port=3000 )]) listenerPrometheus.add_targets(id = 'prometheus', port=9090, targets=[service.load_balancer_target( container_name="prometheus", container_port=9090 )]) listenerPushGateway.add_targets(id = 'pushgateway', port=9091, targets=[service.load_balancer_target( container_name="pushgateway", container_port=9091 )]) , self.mon_lb = loadbalancer_dnsname core.CfnOutput(self, "Monitoring Grafana", value = "%s:%d" % (self.mon_lb, 3000)) core.CfnOutput(self, "Monitoring Prometheus", value = "%s:%d" % (self.mon_lb, 9090))
def __init__(self, scope: core.Construct, construct_id: str, *, secrets: List[Secret]): super().__init__(scope, construct_id) vpc = aws_ec2.Vpc( self, "Vpc", enable_dns_support=True, enable_dns_hostnames=True, max_azs=3, nat_gateways=0, subnet_configuration=[ aws_ec2.SubnetConfiguration( name="Public", subnet_type=aws_ec2.SubnetType.PUBLIC) ], ) postgres_volume_name = "duckbot_dbdata" file_system = aws_efs.FileSystem( self, "PostgresFileSystem", vpc=vpc, encrypted=True, file_system_name=postgres_volume_name, removal_policy=core.RemovalPolicy.DESTROY) file_system.node.default_child.override_logical_id( "FileSystem" ) # rename for compatibility with legacy cloudformation template task_definition = aws_ecs.TaskDefinition( self, "TaskDefinition", compatibility=aws_ecs.Compatibility.EC2, family="duckbot", memory_mib="960", network_mode=aws_ecs.NetworkMode.BRIDGE) postgres_data_path = "/data/postgres" postgres = task_definition.add_container( "postgres", container_name="postgres", image=aws_ecs.ContainerImage.from_registry("postgres:13.2"), essential=False, environment={ "POSTGRES_USER": "******", "POSTGRES_PASSWORD": "******", "PGDATA": postgres_data_path, }, health_check=aws_ecs.HealthCheck( command=["CMD", "pg_isready", "-U", "duckbot"], interval=core.Duration.seconds(30), timeout=core.Duration.seconds(5), retries=3, start_period=core.Duration.seconds(30), ), logging=aws_ecs.LogDriver.aws_logs( stream_prefix="ecs", log_retention=aws_logs.RetentionDays.ONE_MONTH), memory_reservation_mib=128, ) task_definition.add_volume( name=postgres_volume_name, efs_volume_configuration=aws_ecs.EfsVolumeConfiguration( file_system_id=file_system.file_system_id, root_directory="/")) postgres.add_mount_points( aws_ecs.MountPoint(source_volume=postgres_volume_name, container_path=postgres_data_path, read_only=False)) secrets_as_parameters = { # note, parameter version is required by cdk, but does not make it into the template; specify version 1 for simplicity x.environment_name: aws_ssm.StringParameter.from_secure_string_parameter_attributes( self, x.environment_name, parameter_name=x.parameter_name, version=1) for x in secrets } duckbot = task_definition.add_container( "duckbot", container_name="duckbot", essential=True, image=aws_ecs.ContainerImage.from_registry( self.node.try_get_context("duckbot_image")), environment={"STAGE": "prod"}, secrets={ k: aws_ecs.Secret.from_ssm_parameter(v) for k, v in secrets_as_parameters.items() }, health_check=aws_ecs.HealthCheck( command=["CMD", "python", "-m", "duckbot.health"], interval=core.Duration.seconds(30), timeout=core.Duration.seconds(10), retries=3, start_period=core.Duration.seconds(30), ), logging=aws_ecs.LogDriver.aws_logs( stream_prefix="ecs", log_retention=aws_logs.RetentionDays.ONE_MONTH), memory_reservation_mib=128, ) duckbot.add_link(postgres) asg = aws_autoscaling.AutoScalingGroup( self, "AutoScalingGroup", min_capacity=0, max_capacity=1, desired_capacity=1, machine_image=aws_ecs.EcsOptimizedImage.amazon_linux2(), instance_type=aws_ec2.InstanceType("t2.micro"), key_name="duckbot", # needs to be created manually instance_monitoring=aws_autoscaling.Monitoring.BASIC, vpc=vpc, ) asg.connections.allow_to_default_port(file_system) asg.connections.allow_from(aws_ec2.Peer.any_ipv4(), aws_ec2.Port.tcp(22)) asg.connections.allow_from(aws_ec2.Peer.any_ipv4(), aws_ec2.Port.tcp(80)) asg.connections.allow_from(aws_ec2.Peer.any_ipv4(), aws_ec2.Port.tcp(443)) cluster = aws_ecs.Cluster(self, "Cluster", cluster_name="duckbot", vpc=vpc) cluster.add_asg_capacity_provider( aws_ecs.AsgCapacityProvider(cluster, "AsgCapacityProvider", auto_scaling_group=asg), can_containers_access_instance_role=True) aws_ecs.Ec2Service( self, "Service", service_name="duckbot", cluster=cluster, task_definition=task_definition, desired_count=1, min_healthy_percent=0, max_healthy_percent=100, )
def __init__(self, scope: core.Construct, id: str, datalake: DataLakeLayer, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.__datalake = datalake self.security_group = ec2.SecurityGroup( self, 'SecurityGroup', vpc=self.datalake.vpc, allow_all_outbound=True, description='SonarQube Security Group') self.security_group.add_ingress_rule(peer=ec2.Peer.any_ipv4(), connection=ec2.Port.all_traffic(), description='Allow any traffic') self.sonarqube_svr_ecr = ecr.DockerImageAsset( self, 'Repo', directory=os.path.join(root_dir, 'images/sonarqube-server'), repository_name='sonarqube') self.sonarqube_cli_ecr = ecr.DockerImageAsset( self, 'Cli', directory=os.path.join(root_dir, 'images/sonarqube-scanner'), repository_name='sonarqube-cli') self.database = rds.DatabaseCluster( self, 'Database', engine=rds.DatabaseClusterEngine.aurora_postgres( version=rds.AuroraPostgresEngineVersion.VER_11_9), default_database_name='sonarqube', removal_policy=core.RemovalPolicy.DESTROY, credentials=rds.Credentials.from_username( username='******', password=core.SecretValue(value='postgres')), instance_props=rds.InstanceProps( vpc=self.datalake.vpc, security_groups=[self.security_group], instance_type=ec2.InstanceType('r6g.xlarge'))) # self.ecs_cluster = ecs.Cluster(self,'SonarCluster', # container_insights=True, # vpc=self.datalake.vpc, # capacity=ecs.AddCapacityOptions( # machine_image_type= ecs.MachineImageType.AMAZON_LINUX_2, # instance_type=ec2.InstanceType('m5.xlarge'), # allow_all_outbound=True, # associate_public_ip_address=False, # vpc_subnets= ec2.SubnetSelection(subnet_type= ec2.SubnetType.PUBLIC), # desired_capacity=2)) # self.service = ecsp.ApplicationLoadBalancedEc2Service(self,'SonarEc2', # cluster=self.ecs_cluster, # desired_count=1, # listener_port=80, # memory_reservation_mib= 4 * 1024, # task_image_options= ecsp.ApplicationLoadBalancedTaskImageOptions( # image= ecs.ContainerImage.from_docker_image_asset(asset=self.sonarqube_svr_ecr), # container_name='sonarqube-svr', # container_port=9000, # enable_logging=True, # environment={ # '_SONAR_JDBC_URL':'jdbc:postgresql://{}/sonarqube'.format( # self.database.cluster_endpoint.hostname), # '_SONAR_JDBC_USERNAME':'******', # '_SONAR_JDBC_PASSWORD':'******' # })) self.service = ecsp.ApplicationLoadBalancedFargateService( self, 'Server', assign_public_ip=True, vpc=self.datalake.vpc, desired_count=1, cpu=4096, memory_limit_mib=8 * 1024, listener_port=80, platform_version=ecs.FargatePlatformVersion.VERSION1_4, security_groups=[self.security_group, self.datalake.efs_sg], task_image_options=ecsp.ApplicationLoadBalancedTaskImageOptions( image=ecs.ContainerImage.from_docker_image_asset( asset=self.sonarqube_svr_ecr), container_name='sonarqube-svr', container_port=9000, enable_logging=True, environment={ '_SONAR_JDBC_URL': 'jdbc:postgresql://{}/sonarqube'.format( self.database.cluster_endpoint.hostname), '_SONAR_JDBC_USERNAME': '******', '_SONAR_JDBC_PASSWORD': '******' })) for name in ['AmazonElasticFileSystemClientFullAccess']: self.service.task_definition.task_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name(name)) # Override container specific settings container = self.service.task_definition.default_container # Required to start remote sql container.add_ulimits( ecs.Ulimit(name=ecs.UlimitName.NOFILE, soft_limit=262145, hard_limit=262145)) for folder in ['data', 'logs']: efs_ap = self.datalake.efs.add_access_point( 'sonarqube-' + folder, create_acl=efs.Acl(owner_gid="0", owner_uid="0", permissions="777"), path='/sonarqube/' + folder) self.service.task_definition.add_volume( name=folder, efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=self.datalake.efs.file_system_id, transit_encryption='ENABLED', authorization_config=ecs.AuthorizationConfig( access_point_id=efs_ap.access_point_id, iam='DISABLED'))) container.add_mount_points( ecs.MountPoint(container_path='/opt/sonarqube/' + folder, source_volume=folder, read_only=False))
def __init__(self, scope: core.Construct, construct_id: str, props, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html#aws_cdk.aws_efs.FileSystem.add_access_point #Access points allow multiple WordPress file systems to live on the same EFS Volume #The more data on an EFS volume the better it will preform #This provides a high level of security while also optimizing performance AccessPoint = props['file_system'].add_access_point( "local-access-point", path=f"/{props['IdentifierName']}", create_acl=efs.Acl( owner_uid= "100", #https://aws.amazon.com/blogs/containers/developers-guide-to-using-amazon-efs-with-amazon-ecs-and-aws-fargate-part-2/ owner_gid="101", permissions="0755")) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Cluster.html?highlight=ecs%20cluster#aws_cdk.aws_ecs.Cluster cluster = ecs.Cluster( self, "Cluster", vpc=props['vpc'], container_insights=props['ecs_enable_container_insights']) #Get needed secrets #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ssm/StringParameter.html?highlight=from_secure_string_parameter_attributes#aws_cdk.aws_ssm.StringParameter.from_secure_string_parameter_attributes # ParameterStoreTest = ssm.StringParameter.from_secure_string_parameter_attributes( self, "ParameterStoreTest", # parameter_name="", #Remeber, KMS permissions for task execution role for parameter store key! # version=1 # ) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Secret.html #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_secretsmanager/SecretStringGenerator.html dbtest = { "database_name": '', "username": '', "host": str(props["rds_instance"].cluster_endpoint.hostname) } WordpressDbConnectionSecret = secretsmanager.Secret( self, "WordpressDbConnectionSecret", generate_secret_string=secretsmanager.SecretStringGenerator( secret_string_template=json.dumps(dbtest), generate_string_key="password", exclude_characters='/"')) #ToDO: Lambda call to populate secrets but only #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Volume.html#aws_cdk.aws_ecs.Volume WordpressEfsVolume = ecs.Volume( name="efs", efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=props['file_system'].file_system_id, transit_encryption="ENABLED", authorization_config=ecs.AuthorizationConfig( access_point_id=AccessPoint.access_point_id))) #Create Task Definition #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html WordpressTask = ecs.FargateTaskDefinition( self, "TaskDefinition", cpu=props['ecs_cpu_size'], memory_limit_mib=props['ecs_memory_size'], volumes=[WordpressEfsVolume]) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html#aws_cdk.aws_ecs.FargateTaskDefinition.add_container WordpressContainer = WordpressTask.add_container( "Wordpress", image=ecs.ContainerImage.from_ecr_repository( repository=ecr.Repository.from_repository_name( self, "wpimage", repository_name=props['ecs_container_repo_name']), tag=props['ecs_container_tag']), logging=ecs.LogDriver.aws_logs( stream_prefix="container", #log_group = "{props['environment']}/{props['unit']}/{props['application']}", #ToDo make sure I like log group name log_retention=logs.RetentionDays( props['ecs_log_retention_period'])), environment={ "TROUBLESHOOTING_MODE_ENABLED": props['TROUBLESHOOTING_MODE_ENABLED'] }, secrets={ # "PARAMETERSTORETEST": ecs.Secret.from_ssm_parameter( ParameterStoreTest ), "DBHOST": ecs.Secret.from_secrets_manager(WordpressDbConnectionSecret, "host"), "DBUSER": ecs.Secret.from_secrets_manager(WordpressDbConnectionSecret, "username"), "DBUSERPASS": ecs.Secret.from_secrets_manager(WordpressDbConnectionSecret, "password"), "DBNAME": ecs.Secret.from_secrets_manager(WordpressDbConnectionSecret, "database_name") }, ) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/ContainerDefinition.html?highlight=add_port_mappings#aws_cdk.aws_ecs.ContainerDefinition.add_port_mappings WordpressContainer.add_port_mappings( ecs.PortMapping(container_port=80, protocol=ecs.Protocol.TCP)) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/ContainerDefinition.html?highlight=add_port_mappings#aws_cdk.aws_ecs.ContainerDefinition.add_port_mappings #https://gist.github.com/phillippbertram/ee312b09c3982d76b9799653ed6d6201 WordpressContainer.add_mount_points( ecs.MountPoint(container_path=props['ecs_container_efs_path'], read_only=False, source_volume=WordpressEfsVolume.name)) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs_patterns/ApplicationLoadBalancedFargateService.html EcsService = ecs_patterns.ApplicationLoadBalancedFargateService( self, "EcsService", cluster=cluster, desired_count=props['ecs_container_desired_count'], task_definition=WordpressTask, enable_ecs_managed_tags=True, public_load_balancer=True, domain_name=props['domain_name'], domain_zone=route53.HostedZone.from_hosted_zone_attributes( self, "hostedZone", hosted_zone_id=props['domain_zone'], zone_name=props['zone_name']), listener_port=443, redirect_http=True, protocol=elasticloadbalancingv2.ApplicationProtocol("HTTPS"), target_protocol=elasticloadbalancingv2.ApplicationProtocol("HTTP"), platform_version=ecs.FargatePlatformVersion( "VERSION1_4"), #Required for EFS security_groups=[ ec2.SecurityGroup.from_security_group_id( self, "EcsToRdsSeurityGroup", security_group_id=props["EcsToRdsSeurityGroup"]. security_group_id) ], ) #https://gist.github.com/phillippbertram/ee312b09c3982d76b9799653ed6d6201 #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/Connections.html#aws_cdk.aws_ec2.Connections EcsService.service.connections.allow_to( props['file_system'], ec2.Port.tcp(2049)) #Open hole to ECS in EFS SG #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_elasticloadbalancingv2/ApplicationTargetGroup.html#aws_cdk.aws_elasticloadbalancingv2.ApplicationTargetGroup.set_attribute EcsService.target_group.set_attribute( key="load_balancing.algorithm.type", value="least_outstanding_requests") EcsService.target_group.set_attribute( key="deregistration_delay.timeout_seconds", value="30") EcsService.target_group.configure_health_check( healthy_threshold_count=5, #2-10 timeout=core.Duration.seconds(29), ) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateService.html#aws_cdk.aws_ecs.FargateService.auto_scale_task_count ECSAutoScaler = EcsService.service.auto_scale_task_count( max_capacity=props['ecs_container_max_count'], min_capacity=props['ecs_container_min_count']) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/ScalableTaskCount.html#aws_cdk.aws_ecs.ScalableTaskCount ECSAutoScaler.scale_on_cpu_utilization( "cpuScale", target_utilization_percent=80, scale_out_cooldown=core.Duration.seconds(30), scale_in_cooldown=core.Duration.seconds(60)) ECSAutoScaler.scale_on_memory_utilization( "memScale", target_utilization_percent=80, scale_out_cooldown=core.Duration.seconds(30), scale_in_cooldown=core.Duration.seconds(60))
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) dirname = os.path.dirname(__file__) ecr_repo = ecr.Repository.from_repository_name( self, 'UmccriseEcrRepo', repository_name='umccrise' ) ################################################################################ # Set up permissions ro_buckets = set() for bucket in props['ro_buckets']: tmp_bucket = s3.Bucket.from_bucket_name( self, bucket, bucket_name=bucket ) ro_buckets.add(tmp_bucket) rw_buckets = set() for bucket in props['rw_buckets']: tmp_bucket = s3.Bucket.from_bucket_name( self, bucket, bucket_name=bucket ) rw_buckets.add(tmp_bucket) batch_service_role = iam.Role( self, 'BatchServiceRole', assumed_by=iam.ServicePrincipal('batch.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AWSBatchServiceRole') ] ) spotfleet_role = iam.Role( self, 'AmazonEC2SpotFleetRole', assumed_by=iam.ServicePrincipal('spotfleet.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AmazonEC2SpotFleetTaggingRole') ] ) # Create role for Batch instances batch_instance_role = iam.Role( self, 'BatchInstanceRole', role_name='UmccriseBatchInstanceRole', assumed_by=iam.CompositePrincipal( iam.ServicePrincipal('ec2.amazonaws.com'), iam.ServicePrincipal('ecs.amazonaws.com') ), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AmazonEC2RoleforSSM'), iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AmazonEC2ContainerServiceforEC2Role') ] ) batch_instance_role.add_to_policy( iam.PolicyStatement( actions=[ "ec2:Describe*", "ec2:AttachVolume", "ec2:CreateVolume", "ec2:CreateTags", "ec2:ModifyInstanceAttribute" ], resources=["*"] ) ) batch_instance_role.add_to_policy( iam.PolicyStatement( actions=[ "ecs:ListClusters" ], resources=["*"] ) ) for bucket in ro_buckets: bucket.grant_read(batch_instance_role) for bucket in rw_buckets: # restirct write to paths with */umccrise/* bucket.grant_read_write(batch_instance_role, '*/umccrised/*') # Turn the instance role into a Instance Profile batch_instance_profile = iam.CfnInstanceProfile( self, 'BatchInstanceProfile', instance_profile_name='UmccriseBatchInstanceProfile', roles=[batch_instance_role.role_name] ) ################################################################################ # Network # (Import common infrastructure (maintained via TerraForm) # VPC vpc = ec2.Vpc.from_lookup( self, 'UmccrMainVpc', tags={'Name': 'main-vpc', 'Stack': 'networking'} ) batch_security_group = ec2.SecurityGroup( self, "BatchSecurityGroup", vpc=vpc, description="Allow all outbound, no inbound traffic" ) ################################################################################ # Setup Batch compute resources # Configure BlockDevice to expand instance disk space (if needed?) block_device_mappings = [ { 'deviceName': '/dev/xvdf', 'ebs': { 'deleteOnTermination': True, 'encrypted': True, 'volumeSize': 2048, 'volumeType': 'gp2' } } ] # Set up custom user data to configure the Batch instances umccrise_wrapper_asset = assets.Asset( self, 'UmccriseWrapperAsset', path=os.path.join(dirname, '..', 'assets', "umccrise-wrapper.sh") ) umccrise_wrapper_asset.grant_read(batch_instance_role) user_data_asset = assets.Asset( self, 'UserDataAsset', path=os.path.join(dirname, '..', 'assets', "batch-user-data.sh") ) user_data_asset.grant_read(batch_instance_role) user_data = ec2.UserData.for_linux() local_path = user_data.add_s3_download_command( bucket=user_data_asset.bucket, bucket_key=user_data_asset.s3_object_key ) user_data.add_execute_file_command( file_path=local_path, arguments=f"s3://{umccrise_wrapper_asset.bucket.bucket_name}/{umccrise_wrapper_asset.s3_object_key}" ) # Generate user data wrapper to comply with LaunchTemplate required MIME multi-part archive format for user data mime_wrapper = ec2.UserData.custom('MIME-Version: 1.0') mime_wrapper.add_commands('Content-Type: multipart/mixed; boundary="==MYBOUNDARY=="') mime_wrapper.add_commands('') mime_wrapper.add_commands('--==MYBOUNDARY==') mime_wrapper.add_commands('Content-Type: text/x-shellscript; charset="us-ascii"') mime_wrapper.add_commands('') # install AWS CLI, as it's unexpectedly missing from the AWS Linux 2 AMI... mime_wrapper.add_commands('yum -y install unzip') mime_wrapper.add_commands('cd /opt') mime_wrapper.add_commands('curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"') mime_wrapper.add_commands('unzip awscliv2.zip') mime_wrapper.add_commands('sudo ./aws/install --bin-dir /usr/bin') # insert our actual user data payload mime_wrapper.add_commands(user_data.render()) mime_wrapper.add_commands('--==MYBOUNDARY==--') launch_template = ec2.CfnLaunchTemplate( self, 'UmccriseBatchComputeLaunchTemplate', launch_template_name='UmccriseBatchComputeLaunchTemplate', launch_template_data={ 'userData': core.Fn.base64(mime_wrapper.render()), 'blockDeviceMappings': block_device_mappings } ) launch_template_spec = batch.LaunchTemplateSpecification( launch_template_name=launch_template.launch_template_name, version='$Latest' ) my_compute_res = batch.ComputeResources( type=(batch.ComputeResourceType.SPOT if props['compute_env_type'].lower() == 'spot' else batch.ComputeResourceType.ON_DEMAND), allocation_strategy=batch.AllocationStrategy.BEST_FIT_PROGRESSIVE, desiredv_cpus=0, maxv_cpus=320, minv_cpus=0, image=ec2.MachineImage.generic_linux(ami_map={'ap-southeast-2': props['compute_env_ami']}), launch_template=launch_template_spec, spot_fleet_role=spotfleet_role, instance_role=batch_instance_profile.instance_profile_name, vpc=vpc, vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.PRIVATE, # availability_zones=["ap-southeast-2a"] ), security_groups=[batch_security_group] # compute_resources_tags=core.Tag('Creator', 'Batch') ) # XXX: How to add more than one tag above?? # https://github.com/aws/aws-cdk/issues/7350 # core.Tag.add(my_compute_res, 'Foo', 'Bar') my_compute_env = batch.ComputeEnvironment( self, 'UmccriseBatchComputeEnv', compute_environment_name="cdk-umccr_ise-batch-compute-env", service_role=batch_service_role, compute_resources=my_compute_res ) # child = my_compute_env.node.default_child # child_comp_res = child.compute_resources # child_comp_res.tags = "{'Foo': 'Bar'}" job_queue = batch.JobQueue( self, 'UmccriseJobQueue', job_queue_name='cdk-umccrise_job_queue', compute_environments=[ batch.JobQueueComputeEnvironment( compute_environment=my_compute_env, order=1 ) ], priority=10 ) job_container = batch.JobDefinitionContainer( image=ecs.ContainerImage.from_registry(name=props['container_image']), vcpus=32, memory_limit_mib=100000, command=[ "/opt/container/umccrise-wrapper.sh", "Ref::vcpus" ], mount_points=[ ecs.MountPoint( container_path='/work', read_only=False, source_volume='work' ), ecs.MountPoint( container_path='/opt/container', read_only=True, source_volume='container' ) ], volumes=[ ecs.Volume( name='container', host=ecs.Host( source_path='/opt/container' ) ), ecs.Volume( name='work', host=ecs.Host( source_path='/mnt' ) ) ], privileged=True ) job_definition = batch.JobDefinition( self, 'UmccriseJobDefinition', job_definition_name='cdk-umccrise-job-definition', parameters={'vcpus': '1'}, container=job_container, timeout=core.Duration.hours(5) ) ################################################################################ # Set up job submission Lambda lambda_role = iam.Role( self, 'UmccriseLambdaRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AWSLambdaBasicExecutionRole'), iam.ManagedPolicy.from_aws_managed_policy_name('AWSBatchFullAccess') # TODO: restrict! ] ) for bucket in ro_buckets: bucket.grant_read(lambda_role) for bucket in rw_buckets: bucket.grant_read(lambda_role) ecr_repo.grant(lambda_role, 'ecr:ListImages') # TODO: support dev/prod split, i.e. image being configurable on dev, but fixed on prod # may need a default JobDefinition to be set up lmbda.Function( self, 'UmccriseLambda', function_name='umccrise_batch_lambda', handler='umccrise.lambda_handler', runtime=lmbda.Runtime.PYTHON_3_7, code=lmbda.Code.from_asset('lambdas/umccrise'), environment={ 'JOBNAME_PREFIX': "UMCCRISE_", 'JOBQUEUE': job_queue.job_queue_name, 'UMCCRISE_MEM': '100000', 'UMCCRISE_VCPUS': '32', 'JOBDEF': job_definition.job_definition_name, 'REFDATA_BUCKET': props['refdata_bucket'], 'INPUT_BUCKET': props['input_bucket'], 'RESULT_BUCKET': props['result_bucket'], 'IMAGE_CONFIGURABLE': props['image_configurable'] }, role=lambda_role )
def __init__(self, scope: core.Construct, config: dict, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) if 'vpc_id' in config: vpc = ec2.Vpc.from_lookup(self, "ECS-VPC", vpc_id=config["vpc_id"]) else: vpc = None cluster = ecs.Cluster(self, cluster_name="commvault-cs", id="commvault", container_insights=True, vpc=vpc) ### Create demo bucket bucket = s3.Bucket(self, "commvault-bucket", bucket_name="commvault-demo-bucket-{}-{}".format( config["region"], config["account"])) ### This will allow the ALB to generate a certificate. domain_zone = route53.HostedZone.from_lookup( self, "walkerzone", domain_name="code.awalker.dev") ### Create EFS # kms_key = kms.Key(self, "comm-vault-key") commvault_file_system = efs.FileSystem( self, "comvault-efs", vpc=cluster.vpc, file_system_name="commvault-efs", encrypted=True, # kms_key=kms_key , ) # kms_key.grant_encrypt_decrypt(commvault_file_system.) ### Define Task Definition and add the container ecs_task = ecs.FargateTaskDefinition(self, "commvault-task") ecs_task.add_container( "commvault-container", image=ecs.ContainerImage.from_registry( "store/commvaultrepo/mediaagent:SP7"), essential=True, command=[ "-csclientname", "filesys", "-cshost", "-mountpath", '"/opt/libraryPath"', "-cvdport", "8600", "-clienthost", "-clientname", "dockermediaagent" ], logging=ecs.LogDrivers.aws_logs( stream_prefix="commvault")).add_port_mappings( ecs.PortMapping(container_port=80, host_port=80, protocol=ecs.Protocol.TCP)) ecs_task.add_to_task_role_policy(statement=iam.PolicyStatement( actions=["efs:*"], resources=['*'], effect=iam.Effect.ALLOW)) ### Create the ECS Service using the ApplicationLoadBalancedFargate pattern. ecs_service = ecs_patterns.ApplicationLoadBalancedFargateService( self, "commvault-service", assign_public_ip=False, cluster=cluster, task_definition=ecs_task, protocol=elbv2.Protocol.HTTPS, redirect_http=True, domain_name="commvault.code.awalker.dev", domain_zone=domain_zone, platform_version=ecs.FargatePlatformVersion.VERSION1_4, public_load_balancer=False) ### Grant Read/Write to the s3 Bucket for the task bucket.grant_read_write(ecs_service.task_definition.task_role) # -v $TMPDIR/CommvaultLogs:/var/log/commvault/Log_Files ecs_task.add_volume( name="CommvaultLogs", efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=commvault_file_system.file_system_id, transit_encryption="ENABLED", authorization_config=ecs.AuthorizationConfig( #iam="ENABLED", access_point_id=efs.AccessPoint( self, "CommvaultLog-access-point", path="/CommvaultLogs", file_system=commvault_file_system).access_point_id))) ecs_task.default_container.add_mount_points( ecs.MountPoint(container_path="/var/log/commvault/Log_Files", source_volume="CommvaultLogs", read_only=False)) # -v $TMPDIR/CommvaultRegistry/:/etc/CommVaultRegistry ecs_task.add_volume( name="CommVaultRegistry", efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=commvault_file_system.file_system_id, transit_encryption="ENABLED", authorization_config=ecs.AuthorizationConfig( #iam="ENABLED", access_point_id=efs.AccessPoint( self, "CommVaultRegistrys-access-point", path="/CommVaultRegistry", file_system=commvault_file_system).access_point_id))) ecs_task.default_container.add_mount_points( ecs.MountPoint(container_path="/etc/CommVaultRegistry", source_volume="CommVaultRegistry", read_only=False)) # -v $TMPDIR/libraryPath/:/opt/libraryPath ecs_task.add_volume( name="libraryPath", efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=commvault_file_system.file_system_id, transit_encryption="ENABLED", authorization_config=ecs.AuthorizationConfig( #iam="ENABLED", access_point_id=efs.AccessPoint( self, "libraryPath-access-point", path="/libraryPath", file_system=commvault_file_system).access_point_id))) ecs_task.default_container.add_mount_points( ecs.MountPoint(container_path="/opt/libraryPath", source_volume="libraryPath", read_only=False)) # -v $TMPDIR/IndexCache/:/opt/IndexCache ecs_task.add_volume( name="IndexCache", efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=commvault_file_system.file_system_id, transit_encryption="ENABLED", authorization_config=ecs.AuthorizationConfig( #iam="ENABLED", access_point_id=efs.AccessPoint( self, "IndexCache-access-point", path="/IndexCache", file_system=commvault_file_system).access_point_id))) ecs_task.default_container.add_mount_points( ecs.MountPoint(container_path="/opt/IndexCache", source_volume="IndexCache", read_only=False)) # -v $TMPDIR/jobResults/:/opt/jobResults ecs_task.add_volume( name="jobResults", efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=commvault_file_system.file_system_id, transit_encryption="ENABLED", authorization_config=ecs.AuthorizationConfig( #iam="ENABLED", access_point_id=efs.AccessPoint( self, "jobResults-access-point", path="/jobResults", file_system=commvault_file_system).access_point_id))) ecs_task.default_container.add_mount_points( ecs.MountPoint(container_path="/opt/jobResults", source_volume="jobResults", read_only=False)) # -v $TMPDIR/certificates:/opt/commvault/Base/certificates ecs_task.add_volume( name="certificates", efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=commvault_file_system.file_system_id, transit_encryption="ENABLED", authorization_config=ecs.AuthorizationConfig( #iam="ENABLED", access_point_id=efs.AccessPoint( self, "certificates-access-point", path="/certificates", file_system=commvault_file_system).access_point_id))) ecs_task.default_container.add_mount_points( ecs.MountPoint(container_path="/opt/commvault/Base/certificates", source_volume="certificates", read_only=False))
"type": "nfs", "device": device_set, "o": efs_to_connect #"o": "addr=fs-XXXXXX.efs.us-east-1.amazonaws.com,nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport" } docker_vol_config = ecs.DockerVolumeConfiguration(driver='local', scope=ecs.Scope.TASK, driver_opts=driveropts, labels=None) docker_volume = ecs.Volume(name='docker_vol', docker_volume_configuration=docker_vol_config) efs_mount = ecs.MountPoint(container_path='/efs', read_only=True, source_volume='docker_vol') cluster = ecs.Cluster(stack, "wes-ecs", vpc=vpc, cluster_name='Proj-VONC_VISTA') print('cluster sec group ', str(type(cluster.autoscaling_group))) #cluster.add_capacity("DefaultAutoScalingGroup", # instance_type=ec2.InstanceType("c5.xlarge"), key_name='Vonc-Prod-Key',max_capacity=4,machine_image=amitouse, # desired_capacity=2,min_capacity=2) print('connections ', str(cluster.connections)) port = ec2.Port(protocol=ec2.Protocol.TCP, string_representation='inbound to container instances', from_port=22,