def define_port_mappings(self) -> list: """ Define the list of port mappings to use for either AWS VPC deployments or else (bridge etc). Not in use atm as AWS VPC is made mandatory """ service_port_mappings = (getattr(self.container_definition, "PortMappings") if self.container_definition else []) for protocol, mappings in self.ingress_mappings.items(): for target_port, published_ports in mappings.items(): if published_ports: for port in published_ports: service_port_mappings.append( PortMapping( ContainerPort=target_port, HostPort=If(USE_FARGATE_CON_T, NoValue, port), Protocol=protocol.lower(), )) else: service_port_mappings.append( PortMapping( ContainerPort=target_port, HostPort=NoValue, Protocol=protocol.lower(), )) self.handle_expose_ports(service_port_mappings) return service_port_mappings
def define_xray_container(): """ Function to define the XRay container to run with the app :return: """ xray_container = ContainerDefinition( Image=Ref(XRAY_IMAGE), Name="xray-daemon", PortMappings=[ PortMapping(ContainerPort=2000, Protocol="UDP", HostPort=2000) ], Cpu=32, Memory=256, MemoryReservation=256, Essential=False, LogConfiguration=LogConfiguration( LogDriver="awslogs", Options={ "awslogs-group": Ref(LOG_GROUP_T), "awslogs-region": Ref("AWS::Region"), "awslogs-stream-prefix": "xray-daemon", }, ), ) return xray_container
def add_ecs_task_definition(self, container_definitions: List[Dict], cpu: str = "256", memory: str = "512", network_mode: str = "awsvpc", port: int = 80): container_definitions_list = list() self.create_ecs_executor_role() for cd in container_definitions: try: container_definitions_list.append( ContainerDefinition( Name=cd["name"], Image=cd["image"], Essential=True, PortMappings=[PortMapping(ContainerPort=port)] ) if cd["port_bool"] is True else ContainerDefinition( Name=cd["name"], Image=cd["image"], Essential=True, )) except KeyError as exc: raise KeyError( f"Missing Key in container definition(s) - {exc}") task_definition = TaskDefinition( "TaskDefinition", RequiresCompatibilities=["FARGATE"], Cpu=cpu, Memory=memory, NetworkMode=network_mode, ContainerDefinitions=container_definitions_list, ExecutionRoleArn=Ref(Role("ecsExecutorRole"))) self.template.add_resource(task_definition)
def ecs_redirect(self, cluster, url): self._ecs_redirect = True self.template.add_resource( TaskDefinition( "RedirectTaskDef", Volumes=[], Family=Sub("${AWS::StackName}-redirect"), NetworkMode="bridge", ContainerDefinitions=[ ContainerDefinition( Name="redirect", Cpu=1, Environment=[Environment(Name="REDIRECT", Value=url)], Essential=True, Hostname=Sub("${AWS::StackName}-redirect"), Image="cusspvz/redirect:0.0.2", Memory=512, MemoryReservation=128, PortMappings=[ PortMapping(ContainerPort=80, Protocol="tcp") ]) ])) self.template.add_resource( Service("RedirectService", TaskDefinition=Ref("RedirectTaskDef"), Cluster=cluster, DesiredCount=1, DeploymentConfiguration=DeploymentConfiguration( MaximumPercent=200, MinimumHealthyPercent=100), LoadBalancers=[ EcsLoadBalancer( ContainerName="redirect", ContainerPort=80, TargetGroupArn=Ref("DefaultTargetGroup")) ]))
def handle_expose_ports(self, aws_vpc_mappings): """ Import the expose ports to AWS VPC Mappings :param list[troposphere.ecs.PortMapping] aws_vpc_mappings: List of ECS Port Mappings defined from ports[] """ expose_port_re = re.compile( r"^(?P<target>\d{1,5})(?=/(?P<protocol>udp|tcp))") for expose_port in self.expose_ports: if isinstance(expose_port, str): parts = expose_port_re.match(expose_port) if not parts: raise ValueError( "Expose port value is invalid. Must match", expose_port_re.pattern, ) port = int(parts.group("target")) protocol = parts.group("protocol") or "tcp" elif isinstance(expose_port, int): port = expose_port protocol = "tcp" else: raise TypeError(expose_port, "is", type(expose_port), "expected one of", (str, int)) if port not in [p.ContainerPort for p in aws_vpc_mappings]: aws_vpc_mappings.append( PortMapping( HostPort=NoValue, ContainerPort=port, Protocol=protocol.lower(), )) else: LOG.debug( f"{self.name} - Port {port} was already defined as ``ports``." " In awsvpc mode the Container Ports must be unique." f" Skipping {self.name}.expose.{expose_port}")
Name="WebWorker", # 1024 is full CPU Cpu=8, Memory=2048, Essential=True, Image=Join("", [ Ref(AWS_ACCOUNT_ID), ".dkr.ecr.", Ref(AWS_REGION), ".amazonaws.com/", Ref(repository), ":", app_revision, ]), PortMappings=[PortMapping( ContainerPort=10, HostPort=8000, )], Environment=[ Environment( Name="AWS_STORAGE_BUCKET_NAME", Value='blah', ), Environment(Name="CDN_DOMAIN_NAME", Value="DomainName") ], ) ], ) print(template.to_yaml())
from stack.cluster.mongo import mongo_instance, mongo_user, mongo_pass bigid_task_definition = TaskDefinition( "BigIdTask", template=template, ContainerDefinitions=[ ContainerDefinition( Name="bigid-web", Memory="1024", Essential=True, Image=Join("", [ repo_id, "/bigid/bigid-web", ]), PortMappings=[PortMapping(ContainerPort="3000", HostPort="3000")], Links=["bigid-orch"], ExtraHosts=[ HostEntry(Hostname="bigid-mongo", IpAddress=GetAtt(mongo_instance, "PrivateIp")) ], Environment=[ Environment( Name="BIGID_MONGO_USER", Value=mongo_user, ), Environment( Name="BIGID_MONGO_PWD", Value=mongo_pass, ), Environment(
ghost_task_definition = t.add_resource( TaskDefinition( 'GhostTaskDefinition', RequiresCompatibilities=['FARGATE'], Cpu='512', Memory='1GB', NetworkMode='awsvpc', TaskRoleArn=ImportValue(Sub("${DependencyStackName}-TaskRoleArn")), ExecutionRoleArn=ImportValue( Sub("${DependencyStackName}-TaskExecutionRoleArn")), ContainerDefinitions=[ ContainerDefinition( Name='ghost', Image=Ref(ghost_image), Essential=True, PortMappings=[PortMapping(ContainerPort=2368)], Environment=[ Environment( Name='url', Value=ImportValue( Sub("${DependencyStackName}-ALBURL")), ), Environment(Name='database__client', Value='mysql2'), Environment( Name='database__connection__host', Value=ImportValue( Sub("${DependencyStackName}-GhostDBHost")), ), Environment(Name='database__connection__user', Value='ghost'), Environment(Name='database__connection__database',
def build_template(sierrafile): template = Template() template.add_version('2010-09-09') template.add_metadata(build_interface(sierrafile.extra_params)) parameters = AttrDict( # Network Parameters vpc_cidr=template.add_parameter(Parameter( 'VpcCidr', Type='String', Default='192.172.0.0/16', )), subnet1_cidr=template.add_parameter(Parameter( 'Subnet1Cidr', Type='String', Default='192.172.1.0/24', )), subnet2_cidr=template.add_parameter(Parameter( 'Subnet2Cidr', Type='String', Default='192.172.2.0/24', )), # ECS Parameters cluster_size=template.add_parameter(Parameter( 'ClusterSize', Type='Number', Default=2, )), instance_type=template.add_parameter(Parameter( 'InstanceType', Type='String', Default='t2.medium' )), key_name=template.add_parameter(Parameter( 'KeyName', Type='AWS::EC2::KeyPair::KeyName', )), image_id=template.add_parameter(Parameter( 'ImageId', Type='AWS::SSM::Parameter::Value<AWS::EC2::Image::Id>', Default=( '/aws/service/ecs/optimized-ami' '/amazon-linux/recommended/image_id' ), Description=( 'An SSM parameter that resolves to a valid AMI ID.' ' This is the AMI that will be used to create ECS hosts.' ' The default is the current recommended ECS-optimized AMI.' ) )), # Other Parameters github_token=template.add_parameter(Parameter( 'GitHubToken', Type='String', NoEcho=True, )), ) # Environment Variable Parameters for env_var_param, env_var_name in sierrafile.extra_params: template.add_parameter(Parameter( env_var_param, Type='String', NoEcho=True, )) # Resource Declarations # # Network network_vpc = template.add_resource(VPC( 'NetworkVpc', CidrBlock=Ref(parameters.vpc_cidr), Tags=Tags(Name=Ref('AWS::StackName')), )) network_ig = template.add_resource(InternetGateway( 'NetworkInternetGateway', Tags=Tags(Name=Ref('AWS::StackName')), )) vpc_attach = template.add_resource(VPCGatewayAttachment( 'NetworkInternetGatewayAttachment', InternetGatewayId=Ref(network_ig), VpcId=Ref(network_vpc), )) route_table = template.add_resource(RouteTable( 'NetworkRouteTable', VpcId=Ref(network_vpc), Tags=Tags(Name=Ref('AWS::StackName')), )) template.add_resource(Route( 'NetworkDefaultRoute', DependsOn=[vpc_attach.title], RouteTableId=Ref(route_table), DestinationCidrBlock='0.0.0.0/0', GatewayId=Ref(network_ig), )) subnet1 = template.add_resource(Subnet( 'NetworkSubnet1', VpcId=Ref(network_vpc), AvailabilityZone=Select(0, GetAZs()), MapPublicIpOnLaunch=True, CidrBlock=Ref(parameters.subnet1_cidr), Tags=Tags(Name=Sub('${AWS::StackName} (Public)')), )) subnet2 = template.add_resource(Subnet( 'NetworkSubnet2', VpcId=Ref(network_vpc), AvailabilityZone=Select(1, GetAZs()), MapPublicIpOnLaunch=True, CidrBlock=Ref(parameters.subnet2_cidr), Tags=Tags(Name=Sub('${AWS::StackName} (Public)')), )) template.add_resource(SubnetRouteTableAssociation( 'NetworkSubnet1RouteTableAssociation', RouteTableId=Ref(route_table), SubnetId=Ref(subnet1), )) template.add_resource(SubnetRouteTableAssociation( 'NetworkSubnet2RouteTableAssociation', RouteTableId=Ref(route_table), SubnetId=Ref(subnet2), )) elb = template.add_resource(LoadBalancer( ELB_NAME, Name=Sub('${AWS::StackName}-elb'), Type='network', Subnets=[Ref(subnet1), Ref(subnet2)], )) # # Cluster ecs_host_role = template.add_resource(Role( 'EcsHostRole', AssumeRolePolicyDocument=PolicyDocument( Statement=[Statement( Effect=Allow, Principal=Principal('Service', 'ec2.amazonaws.com'), Action=[awacs.sts.AssumeRole] )], ), ManagedPolicyArns=[ 'arn:aws:iam::aws:policy/' 'service-role/AmazonEC2ContainerServiceforEC2Role' ] )) ecs_host_profile = template.add_resource(InstanceProfile( 'EcsHostInstanceProfile', Roles=[Ref(ecs_host_role)] )) ecs_host_sg = template.add_resource(SecurityGroup( 'EcsHostSecurityGroup', GroupDescription=Sub('${AWS::StackName}-hosts'), VpcId=Ref(network_vpc), SecurityGroupIngress=[SecurityGroupRule( CidrIp='0.0.0.0/0', IpProtocol='-1' )] )) cluster = template.add_resource(Cluster( 'EcsCluster', ClusterName=Ref('AWS::StackName') )) autoscaling_name = 'EcsHostAutoScalingGroup' launch_conf_name = 'EcsHostLaunchConfiguration' launch_conf = template.add_resource(LaunchConfiguration( launch_conf_name, ImageId=Ref(parameters.image_id), InstanceType=Ref(parameters.instance_type), IamInstanceProfile=Ref(ecs_host_profile), KeyName=Ref(parameters.key_name), SecurityGroups=[Ref(ecs_host_sg)], UserData=Base64(Sub( '#!/bin/bash\n' 'yum install -y aws-cfn-bootstrap\n' '/opt/aws/bin/cfn-init -v' ' --region ${AWS::Region}' ' --stack ${AWS::StackName}' f' --resource {launch_conf_name}\n' '/opt/aws/bin/cfn-signal -e $?' ' --region ${AWS::Region}' ' --stack ${AWS::StackName}' f' --resource {autoscaling_name}\n' )), Metadata={ 'AWS::CloudFormation::Init': { 'config': { 'commands': { '01_add_instance_to_cluster': { 'command': Sub( f'echo ECS_CLUSTER=${{{cluster.title}}}' f' > /etc/ecs/ecs.config' ), } }, 'files': { '/etc/cfn/cfn-hup.conf': { 'mode': 0o400, 'owner': 'root', 'group': 'root', 'content': Sub( '[main]\n' 'stack=${AWS::StackId}\n' 'region=${AWS::Region}\n' ), }, '/etc/cfn/hooks.d/cfn-auto-reloader.conf': { 'content': Sub( '[cfn-auto-reloader-hook]\n' 'triggers=post.update\n' 'path=Resources.ContainerInstances.Metadata' '.AWS::CloudFormation::Init\n' 'action=/opt/aws/bin/cfn-init -v' ' --region ${AWS::Region}' ' --stack ${AWS::StackName}' f' --resource {launch_conf_name}\n' ), }, }, 'services': { 'sysvinit': { 'cfn-hup': { 'enabled': True, 'ensureRunning': True, 'files': [ '/etc/cfn/cfn-hup.conf', '/etc/cfn/hooks.d/cfn-auto-reloader.conf' ] } } } } } } )) autoscaling_group = template.add_resource(AutoScalingGroup( autoscaling_name, VPCZoneIdentifier=[Ref(subnet1), Ref(subnet2)], LaunchConfigurationName=Ref(launch_conf), DesiredCapacity=Ref(parameters.cluster_size), MinSize=Ref(parameters.cluster_size), MaxSize=Ref(parameters.cluster_size), Tags=[{ 'Key': 'Name', 'Value': Sub('${AWS::StackName} - ECS Host'), 'PropagateAtLaunch': True, }], CreationPolicy=CreationPolicy( ResourceSignal=ResourceSignal(Timeout='PT15M'), ), UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( MinInstancesInService=1, MaxBatchSize=1, PauseTime='PT5M', WaitOnResourceSignals=True, ), ), )) # # Services task_role = template.add_resource(Role( 'TaskExecutionRole', AssumeRolePolicyDocument=PolicyDocument( Statement=[Statement( Effect=Allow, Principal=Principal('Service', 'ecs-tasks.amazonaws.com'), Action=[awacs.sts.AssumeRole], )], ), ManagedPolicyArns=[ 'arn:aws:iam::aws:policy/' 'service-role/AmazonECSTaskExecutionRolePolicy' ], )) artifact_bucket = template.add_resource(Bucket( 'ArtifactBucket', DeletionPolicy='Retain', )) codebuild_role = template.add_resource(Role( 'CodeBuildServiceRole', Path='/', AssumeRolePolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Principal=Principal( 'Service', 'codebuild.amazonaws.com' ), Action=[ awacs.sts.AssumeRole, ], ), ], ), Policies=[Policy( PolicyName='root', PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Resource=['*'], Effect=Allow, Action=[ awacs.ssm.GetParameters, ], ), Statement( Resource=['*'], Effect=Allow, Action=[ awacs.s3.GetObject, awacs.s3.PutObject, awacs.s3.GetObjectVersion, ], ), Statement( Resource=['*'], Effect=Allow, Action=[ awacs.logs.CreateLogGroup, awacs.logs.CreateLogStream, awacs.logs.PutLogEvents, ], ), ], ), )], )) codepipeline_role = template.add_resource(Role( 'CodePipelineServiceRole', Path='/', AssumeRolePolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Principal=Principal( 'Service', 'codepipeline.amazonaws.com' ), Action=[ awacs.sts.AssumeRole, ], ), ], ), Policies=[Policy( PolicyName='root', PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Resource=[ Sub(f'${{{artifact_bucket.title}.Arn}}/*') ], Effect=Allow, Action=[ awacs.s3.GetBucketVersioning, awacs.s3.GetObject, awacs.s3.GetObjectVersion, awacs.s3.PutObject, ], ), Statement( Resource=['*'], Effect=Allow, Action=[ awacs.ecs.DescribeServices, awacs.ecs.DescribeTaskDefinition, awacs.ecs.DescribeTasks, awacs.ecs.ListTasks, awacs.ecs.RegisterTaskDefinition, awacs.ecs.UpdateService, awacs.codebuild.StartBuild, awacs.codebuild.BatchGetBuilds, awacs.iam.PassRole, ], ), ], ), )], )) log_group = template.add_resource(LogGroup( 'LogGroup', LogGroupName=Sub('/ecs/${AWS::StackName}'), )) if any(conf.pipeline.enable for conf in sierrafile.services.values()): project = template.add_resource(Project( 'CodeBuildProject', Name=Sub('${AWS::StackName}-build'), ServiceRole=Ref(codebuild_role), Artifacts=Artifacts(Type='CODEPIPELINE'), Source=Source(Type='CODEPIPELINE'), Environment=Environment( ComputeType='BUILD_GENERAL1_SMALL', Image='aws/codebuild/docker:17.09.0', Type='LINUX_CONTAINER', ), )) for name, settings in sierrafile.services.items(): task_definition = template.add_resource(TaskDefinition( f'{name}TaskDefinition', RequiresCompatibilities=['EC2'], Cpu=str(settings.container.cpu), Memory=str(settings.container.memory), NetworkMode='bridge', ExecutionRoleArn=Ref(task_role.title), ContainerDefinitions=[ ContainerDefinition( Name=f'{name}', Image=settings.container.image, Memory=str(settings.container.memory), Essential=True, PortMappings=[ PortMapping( ContainerPort=settings.container.port, Protocol='tcp', ), ], Environment=[ troposphere.ecs.Environment(Name=k, Value=v) for k, v in sierrafile.env_vars.items() if k in settings.get('environment', []) ], LogConfiguration=LogConfiguration( LogDriver='awslogs', Options={ 'awslogs-region': Ref('AWS::Region'), 'awslogs-group': Ref(log_group.title), 'awslogs-stream-prefix': Ref('AWS::StackName'), }, ), ), ], )) target_group = template.add_resource(TargetGroup( f'{name}TargetGroup', Port=settings.container.port, Protocol='TCP', VpcId=Ref(network_vpc), Tags=Tags(Name=Sub(f'${{AWS::StackName}}-{name}')), )) listener = template.add_resource(Listener( f'{name}ElbListener', LoadBalancerArn=Ref(elb), Port=settings.container.port, Protocol='TCP', DefaultActions=[ Action(TargetGroupArn=Ref(target_group), Type='forward') ], )) service = template.add_resource(Service( f'{name}Service', Cluster=Ref(cluster), ServiceName=f'{name}-service', DependsOn=[autoscaling_group.title, listener.title], DesiredCount=settings.container.count, TaskDefinition=Ref(task_definition), LaunchType='EC2', LoadBalancers=[ troposphere.ecs.LoadBalancer( ContainerName=f'{name}', ContainerPort=settings.container.port, TargetGroupArn=Ref(target_group), ), ], )) if settings.pipeline.enable: pipeline = template.add_resource(Pipeline( f'{name}Pipeline', RoleArn=GetAtt(codepipeline_role, 'Arn'), ArtifactStore=ArtifactStore( Type='S3', Location=Ref(artifact_bucket), ), Stages=[ Stages( Name='Source', Actions=[Actions( Name='Source', ActionTypeId=ActionTypeId( Category='Source', Owner='ThirdParty', Version='1', Provider='GitHub', ), OutputArtifacts=[ OutputArtifacts(Name=f'{name}Source'), ], RunOrder='1', Configuration={ 'Owner': settings.pipeline.user, 'Repo': settings.pipeline.repo, 'Branch': settings.pipeline.branch, 'OAuthToken': Ref(parameters.github_token), }, )], ), Stages( Name='Build', Actions=[Actions( Name='Build', ActionTypeId=ActionTypeId( Category='Build', Owner='AWS', Version='1', Provider='CodeBuild', ), InputArtifacts=[ InputArtifacts(Name=f'{name}Source'), ], OutputArtifacts=[ OutputArtifacts(Name=f'{name}Build'), ], RunOrder='1', Configuration={ 'ProjectName': Ref(project), }, )], ), Stages( Name='Deploy', Actions=[Actions( Name='Deploy', ActionTypeId=ActionTypeId( Category='Deploy', Owner='AWS', Version='1', Provider='ECS', ), InputArtifacts=[ InputArtifacts(Name=f'{name}Build') ], RunOrder='1', Configuration={ 'ClusterName': Ref(cluster), 'ServiceName': Ref(service), 'FileName': 'image.json', }, )], ), ], )) template.add_resource(Webhook( f'{name}CodePipelineWebhook', Name=Sub(f'${{AWS::StackName}}-{name}-webhook'), Authentication='GITHUB_HMAC', AuthenticationConfiguration=AuthenticationConfiguration( SecretToken=Ref(parameters.github_token), ), Filters=[FilterRule( JsonPath='$.ref', MatchEquals=f'refs/heads/{settings.pipeline.branch}' )], TargetAction='Source', TargetPipeline=Ref(pipeline), TargetPipelineVersion=1, RegisterWithThirdParty=True, )) return template
def _add_service(self, service_name, config): env_config = build_config( self.env, self.application_name, self.env_sample_file_path ) container_definition_arguments = { "Environment": [ Environment(Name=k, Value=v) for (k, v) in env_config ], "Name": service_name + "Container", "Image": self.ecr_image_uri + ':' + self.current_version, "Essential": 'true', "LogConfiguration": self._gen_log_config(service_name), "MemoryReservation": int(config['memory_reservation']), "Cpu": 0 } if 'http_interface' in config: container_definition_arguments['PortMappings'] = [ PortMapping( ContainerPort=int( config['http_interface']['container_port'] ) ) ] if config['command'] is not None: container_definition_arguments['Command'] = [config['command']] cd = ContainerDefinition(**container_definition_arguments) task_role = self.template.add_resource(Role( service_name + "Role", AssumeRolePolicyDocument=PolicyDocument( Statement=[ Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["ecs-tasks.amazonaws.com"]) ) ] ) )) td = TaskDefinition( service_name + "TaskDefinition", Family=service_name + "Family", ContainerDefinitions=[cd], TaskRoleArn=Ref(task_role) ) self.template.add_resource(td) desired_count = self._get_desired_task_count_for_service(service_name) deployment_configuration = DeploymentConfiguration( MinimumHealthyPercent=100, MaximumPercent=200 ) if 'http_interface' in config: alb, lb, service_listener = self._add_alb(cd, service_name, config) svc = Service( service_name, LoadBalancers=[lb], Cluster=self.cluster_name, Role=Ref(self.ecs_service_role), TaskDefinition=Ref(td), DesiredCount=desired_count, DependsOn=service_listener.title, PlacementStrategies=self.PLACEMENT_STRATEGIES ) self.template.add_output( Output( service_name + 'EcsServiceName', Description='The ECS name which needs to be entered', Value=GetAtt(svc, 'Name') ) ) self.template.add_output( Output( service_name + "URL", Description="The URL at which the service is accessible", Value=Sub("https://${" + alb.name + ".DNSName}") ) ) self.template.add_resource(svc) else: svc = Service( service_name, Cluster=self.cluster_name, TaskDefinition=Ref(td), DesiredCount=desired_count, DeploymentConfiguration=deployment_configuration, PlacementStrategies=self.PLACEMENT_STRATEGIES ) self.template.add_output( Output( service_name + 'EcsServiceName', Description='The ECS name which needs to be entered', Value=GetAtt(svc, 'Name') ) ) self.template.add_resource(svc) self._add_service_alarms(svc)
# 1024 is full CPU Cpu=web_worker_cpu, Memory=web_worker_memory, Essential=True, Image=Join("", [ Ref(AWS_ACCOUNT_ID), ".dkr.ecr.", Ref(AWS_REGION), ".amazonaws.com/", Ref(repository), ":", app_revision, ]), PortMappings=[ PortMapping( ContainerPort=web_worker_port, HostPort=web_worker_port, ) ], LogConfiguration=LogConfiguration(LogDriver="awslogs", Options={ 'awslogs-group': Ref(container_log_group), 'awslogs-region': Ref(AWS_REGION), 'awslogs-stream-prefix': Ref(AWS_STACK_NAME), }), Environment=[ Environment(Name=k, Value=v) for k, v in environment_variables ] + [ Environment(Name="PORT", Value=web_worker_port),
def _add_service(self, service_name, config): launch_type = self.LAUNCH_TYPE_FARGATE if 'fargate' in config else self.LAUNCH_TYPE_EC2 env_config = build_config( self.env, self.application_name, self.env_sample_file_path ) container_definition_arguments = { "Environment": [ Environment(Name=k, Value=v) for (k, v) in env_config ], "Name": service_name + "Container", "Image": self.ecr_image_uri + ':' + self.current_version, "Essential": 'true', "LogConfiguration": self._gen_log_config(service_name), "MemoryReservation": int(config['memory_reservation']), "Cpu": 0 } if 'http_interface' in config: container_definition_arguments['PortMappings'] = [ PortMapping( ContainerPort=int( config['http_interface']['container_port'] ) ) ] if config['command'] is not None: container_definition_arguments['Command'] = [config['command']] cd = ContainerDefinition(**container_definition_arguments) task_role = self.template.add_resource(Role( service_name + "Role", AssumeRolePolicyDocument=PolicyDocument( Statement=[ Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["ecs-tasks.amazonaws.com"]) ) ] ) )) launch_type_td = {} if launch_type == self.LAUNCH_TYPE_FARGATE: launch_type_td = { 'RequiresCompatibilities': ['FARGATE'], 'ExecutionRoleArn': boto3.resource('iam').Role('ecsTaskExecutionRole').arn, 'NetworkMode': 'awsvpc', 'Cpu': str(config['fargate']['cpu']), 'Memory': str(config['fargate']['memory']) } td = TaskDefinition( service_name + "TaskDefinition", Family=service_name + "Family", ContainerDefinitions=[cd], TaskRoleArn=Ref(task_role), **launch_type_td ) self.template.add_resource(td) desired_count = self._get_desired_task_count_for_service(service_name) deployment_configuration = DeploymentConfiguration( MinimumHealthyPercent=100, MaximumPercent=200 ) if 'http_interface' in config: alb, lb, service_listener, alb_sg = self._add_alb(cd, service_name, config, launch_type) if launch_type == self.LAUNCH_TYPE_FARGATE: # if launch type is ec2, then services inherit the ec2 instance security group # otherwise, we need to specify a security group for the service service_security_group = SecurityGroup( pascalcase("FargateService" + self.env + service_name), GroupName=pascalcase("FargateService" + self.env + service_name), SecurityGroupIngress=[{ 'IpProtocol': 'TCP', 'SourceSecurityGroupId': Ref(alb_sg), 'ToPort': int(config['http_interface']['container_port']), 'FromPort': int(config['http_interface']['container_port']), }], VpcId=Ref(self.vpc), GroupDescription=pascalcase("FargateService" + self.env + service_name) ) self.template.add_resource(service_security_group) launch_type_svc = { 'NetworkConfiguration': NetworkConfiguration( AwsvpcConfiguration=AwsvpcConfiguration( Subnets=[ Ref(self.private_subnet1), Ref(self.private_subnet2) ], SecurityGroups=[ Ref(service_security_group) ] ) ) } else: launch_type_svc = { 'Role': Ref(self.ecs_service_role), 'PlacementStrategies': self.PLACEMENT_STRATEGIES } svc = Service( service_name, LoadBalancers=[lb], Cluster=self.cluster_name, TaskDefinition=Ref(td), DesiredCount=desired_count, DependsOn=service_listener.title, LaunchType=launch_type, **launch_type_svc, ) self.template.add_output( Output( service_name + 'EcsServiceName', Description='The ECS name which needs to be entered', Value=GetAtt(svc, 'Name') ) ) self.template.add_output( Output( service_name + "URL", Description="The URL at which the service is accessible", Value=Sub("https://${" + alb.name + ".DNSName}") ) ) self.template.add_resource(svc) else: launch_type_svc = {} if launch_type == self.LAUNCH_TYPE_FARGATE: # if launch type is ec2, then services inherit the ec2 instance security group # otherwise, we need to specify a security group for the service service_security_group = SecurityGroup( pascalcase("FargateService" + self.env + service_name), GroupName=pascalcase("FargateService" + self.env + service_name), SecurityGroupIngress=[], VpcId=Ref(self.vpc), GroupDescription=pascalcase("FargateService" + self.env + service_name) ) self.template.add_resource(service_security_group) launch_type_svc = { 'NetworkConfiguration': NetworkConfiguration( AwsvpcConfiguration=AwsvpcConfiguration( Subnets=[ Ref(self.private_subnet1), Ref(self.private_subnet2) ], SecurityGroups=[ Ref(service_security_group) ] ) ) } else: launch_type_svc = { 'PlacementStrategies': self.PLACEMENT_STRATEGIES } svc = Service( service_name, Cluster=self.cluster_name, TaskDefinition=Ref(td), DesiredCount=desired_count, DeploymentConfiguration=deployment_configuration, LaunchType=launch_type, **launch_type_svc ) self.template.add_output( Output( service_name + 'EcsServiceName', Description='The ECS name which needs to be entered', Value=GetAtt(svc, 'Name') ) ) self.template.add_resource(svc) self._add_service_alarms(svc)
def add_envoy_container_definition(self): """ Method to expand the containers configuration and add the Envoy SideCar. """ envoy_container_name = "envoy" task = self.stack.service.task envoy_port_mapping = [ PortMapping(ContainerPort=15000, HostPort=15000), PortMapping(ContainerPort=15001, HostPort=15001), ] envoy_environment = [ Environment( Name="APPMESH_VIRTUAL_NODE_NAME", Value=Sub( f"mesh/${{{appmesh_params.MESH_NAME.title}}}/virtualNode/${{{self.node.title}.VirtualNodeName}}" ), ), Environment( Name="ENABLE_ENVOY_XRAY_TRACING", Value="1" if task.family_config.use_xray else "0", ), Environment(Name="ENABLE_ENVOY_STATS_TAGS", Value="1"), ] envoy_log_config = LogConfiguration( LogDriver="awslogs", Options={ "awslogs-group": Ref(ecs_params.LOG_GROUP_T), "awslogs-region": Ref("AWS::Region"), "awslogs-stream-prefix": envoy_container_name, }, ) self.stack.stack_template.add_parameter(appmesh_params.ENVOY_IMAGE_URL) envoy_container = ContainerDefinition( Image=Ref(appmesh_params.ENVOY_IMAGE_URL), Name=envoy_container_name, Cpu="128", Memory="256", User="******", Essential=True, LogConfiguration=envoy_log_config, Environment=envoy_environment, PortMappings=envoy_port_mapping, Ulimits=[Ulimit(HardLimit=15000, SoftLimit=15000, Name="nofile")], HealthCheck=HealthCheck( Command=[ "CMD-SHELL", "curl -s http://localhost:9901/server_info | grep state | grep -q LIVE", ], Interval=5, Timeout=2, Retries=3, StartPeriod=10, ), ) proxy_config = ProxyConfiguration( ContainerName="envoy", Type="APPMESH", ProxyConfigurationProperties=[ Environment(Name="IgnoredUID", Value="1337"), Environment(Name="ProxyIngressPort", Value="15000",), Environment(Name="ProxyEgressPort", Value="15001"), Environment(Name="IgnoredGID", Value=""), Environment( Name="EgressIgnoredIPs", Value="169.254.170.2,169.254.169.254" ), Environment(Name="EgressIgnoredPorts", Value=""), Environment( Name="AppPorts", Value=",".join([f"{port.Port}" for port in self.port_mappings]), ), ], ) task.containers.append(envoy_container) setattr(task.definition, "ProxyConfiguration", proxy_config) task.set_task_compute_parameter()
if linux_parameters: definition.LinuxParameters = linux_parameters for volume in volumes: volume_name = '{}{}'.format( name, ''.join([i for i in volume[0].capitalize() if i.isalpha()])) task.Volumes.append( Volume(Name=volume_name, Host=Host(SourcePath=('/mnt/{}'.format(volume[0]))))) definition.MountPoints.append( MountPoint(ContainerPath=volume[1], SourceVolume=volume_name)) for port in ports: definition.PortMappings.append( PortMapping( ContainerPort=port, HostPort=port, Protocol='tcp', )) for depend in depends: definition.DependsOn.append( ContainerDependency( Condition='START', ContainerName=depend, )) definition.Links.append(depend) task.ContainerDefinitions.append(definition) t.add_resource(task) if schedule: target = Target( Id="{}-Schedule".format(name),
def build_cloudformation_resource( self, container_configurations, ecr_image_uri, fallback_task_role, fallback_task_execution_role, ): environment = self.environment service_name = self.service_name config = self.configuration launch_type = get_launch_type(config) task_family_name = f'{environment}{service_name}Family'[:255] td_kwargs = dict() td_kwargs['PlacementConstraints'] = [ PlacementConstraint(Type=constraint['type'], Expression=constraint['expression']) for constraint in config.get('placement_constraints', []) ] td_kwargs['TaskRoleArn'] = config.get('task_role_arn') if 'task_role_arn' in config \ else fallback_task_role td_kwargs['ExecutionRoleArn'] = config.get('task_execution_role_arn') \ if 'task_execution_role_arn' in config \ else fallback_task_execution_role if ('udp_interface' in config) or ('tcp_interface' in config): td_kwargs['NetworkMode'] = 'awsvpc' log_config = self._gen_log_config(config) env_config = container_configurations[container_name( service_name)].get('environment', {}) secrets_config = container_configurations[container_name( service_name)].get('secrets', {}) cd_kwargs = { "Environment": [ Environment(Name=name, Value=env_config[name]) for name in env_config ], "Secrets": [ Secret(Name=name, ValueFrom=secrets_config[name]) for name in secrets_config ], "Name": container_name(service_name), "Image": ecr_image_uri, "Essential": True, "LogConfiguration": log_config, "MemoryReservation": int(config['memory_reservation']), "Cpu": 0, 'Memory': int(config.get('memory_hard_limit', HARD_LIMIT_MEMORY_IN_MB)), } if config.get('command'): # command can be one of list(string) / string command = config.get('command') if isinstance(command, str): cd_kwargs['Command'] = [command] elif isinstance(command, list): cd_kwargs['Command'] = command else: raise UnrecoverableException( 'command should either be string or list(string)') if 'stop_timeout' in config: cd_kwargs['StopTimeout'] = int(config['stop_timeout']) if 'system_controls' in config: cd_kwargs['SystemControls'] = [ SystemControl(Namespace=system_control['namespace'], Value=system_control['value']) for system_control in config['system_controls'] ] if 'dns_servers' in config: cd_kwargs['DnsServers'] = config.get('dns_servers', []) if launch_type == LAUNCH_TYPE_FARGATE: if 'udp_interface' in config: raise NotImplementedError( 'udp interface not yet implemented in fargate type, please use ec2 type' ) elif 'tcp_interface' in config: raise NotImplementedError( 'tcp interface not yet implemented in fargate type, please use ec2 type' ) if 'http_interface' in config: cd_kwargs['PortMappings'] = [ PortMapping(ContainerPort=int(config['http_interface'] ['container_port'])) ] elif 'udp_interface' in config: cd_kwargs['PortMappings'] = [ PortMapping(ContainerPort=int( config['udp_interface']['container_port']), HostPort=int( config['udp_interface']['container_port']), Protocol='udp'), PortMapping(ContainerPort=int( config['udp_interface']['health_check_port']), HostPort=int( config['udp_interface']['health_check_port']), Protocol='tcp') ] elif 'tcp_interface' in config: cd_kwargs['PortMappings'] = [ PortMapping(ContainerPort=int( config['tcp_interface']['container_port']), Protocol='tcp') ] if 'container_health_check' in config: configured_health_check = config['container_health_check'] ecs_health_check = { 'Command': ['CMD-SHELL', configured_health_check['command']] } if 'start_period' in configured_health_check: ecs_health_check['StartPeriod'] = int( configured_health_check['start_period']) if 'retries' in configured_health_check: ecs_health_check['Retries'] = int( configured_health_check['retries']) if 'interval' in configured_health_check: ecs_health_check['Interval'] = int( configured_health_check['interval']) if 'timeout' in configured_health_check: ecs_health_check['Timeout'] = int( configured_health_check['timeout']) cd_kwargs['HealthCheck'] = HealthCheck(**ecs_health_check) if 'sidecars' in config: links = [] for sidecar in config['sidecars']: sidecar_name = sidecar.get('name') links.append("{}:{}".format(container_name(sidecar_name), sidecar_name)) cd_kwargs['Links'] = links if 'container_labels' in config: cd_kwargs['DockerLabels'] = config.get('container_labels') if 'ulimits' in config: cd_kwargs['Ulimits'] = [ Ulimit( Name=ulimit['name'], SoftLimit=ulimit['soft_limit'], HardLimit=ulimit['hard_limit'], ) for ulimit in config.get('ulimits', []) ] cd = ContainerDefinition(**cd_kwargs) container_definitions = [cd] if 'sidecars' in config: for sidecar in config['sidecars']: container_definitions.append( self._gen_container_definitions_for_sidecar( sidecar, log_config, container_configurations.get( container_name(sidecar.get('name')), {})), ) if launch_type == LAUNCH_TYPE_FARGATE: td_kwargs['RequiresCompatibilities'] = [LAUNCH_TYPE_FARGATE] td_kwargs['NetworkMode'] = 'awsvpc' td_kwargs['Cpu'] = str(config['fargate']['cpu']) td_kwargs['Memory'] = str(config['fargate']['memory']) return TaskDefinition(self._resource_name(service_name), Family=task_family_name, ContainerDefinitions=container_definitions, **td_kwargs)
def main(): template = Template() template.add_version("2010-09-09") template.set_description("AWS CloudFormation ECS Service") # Add the Parameters Application = template.add_parameter( Parameter( "Application", Type="String", )) DockerImage = template.add_parameter( Parameter( "DockerImage", Type="String", )) USERNAME = template.add_parameter(Parameter( "USERNAME", Type="String", )) ClusterName = template.add_parameter( Parameter( "ClusterName", Type="String", )) ContainerPort = template.add_parameter( Parameter( "ContainerPort", Type="String", )) HostPort = template.add_parameter(Parameter( "HostPort", Type="String", )) HostedZoneName = template.add_parameter( Parameter( "HostedZoneName", Type="String", )) CertArn = template.add_parameter(Parameter( "CertArn", Type="String", )) ExecutionRoleArn = template.add_parameter( Parameter("ExecutionRoleArn", Type="String", Description="Execution Role to get creadentials from ssm")) HealthCheckPath = template.add_parameter( Parameter( "HealthCheckPath", Type="String", )) HealthCheckIntervalSeconds = template.add_parameter( Parameter( "HealthCheckIntervalSeconds", Type="String", )) HealthyThresholdCount = template.add_parameter( Parameter( "HealthyThresholdCount", Type="String", )) HealthCheckTimeoutSeconds = template.add_parameter( Parameter( "HealthCheckTimeoutSeconds", Type="String", )) UnhealthyThresholdCount = template.add_parameter( Parameter( "UnhealthyThresholdCount", Type="String", )) VpcId = template.add_parameter(Parameter( "VpcId", Type="String", )) Subnets = template.add_parameter( Parameter( "Subnets", Type="List<AWS::EC2::Subnet::Id>", )) # Add the application ELB NetworkLB = template.add_resource( elb.LoadBalancer("NetworkLB", Name=Join("", [Ref(Application), "-nlb"]), Scheme="internet-facing", Subnets=Ref(Subnets), Type='network')) NlbTargetGroup = template.add_resource( elb.TargetGroup( "NlbTargetGroup", Name='ecs-service-targetgroup', HealthCheckIntervalSeconds=Ref(HealthCheckIntervalSeconds), HealthCheckProtocol="TCP", HealthyThresholdCount=Ref(HealthyThresholdCount), Port=80, Protocol="TCP", UnhealthyThresholdCount=Ref(UnhealthyThresholdCount), VpcId=Ref(VpcId))) NlbListener = template.add_resource( elb.Listener( "Listener", DependsOn=["NlbTargetGroup", "NetworkLB"], Certificates=[elb.Certificate(CertificateArn=Ref(CertArn))], Port="443", Protocol="TLS", LoadBalancerArn=Ref(NetworkLB), DefaultActions=[ elb.Action(Type="forward", TargetGroupArn=Ref(NlbTargetGroup)) ])) Task_Definition = template.add_resource( TaskDefinition( 'TaskDefinition', Memory='500', ExecutionRoleArn=Ref(ExecutionRoleArn), ContainerDefinitions=[ ContainerDefinition( Name=Join("", [Ref(Application)]), Image=Ref(DockerImage), Essential=True, Secrets=[Secret(Name='USERNAME', ValueFrom=Ref(USERNAME))], Environment=[ Environment(Name="DOCKER_LABELS", Value="true") ], DockerLabels={ 'aws-account': Ref("AWS::AccountId"), 'region': Ref("AWS::Region"), 'stack': Ref("AWS::StackName") }, PortMappings=[ PortMapping(ContainerPort=Ref(ContainerPort), HostPort=Ref(HostPort)) ]) ])) app_service = template.add_resource( Service("AppService", DependsOn=["Listener", "TaskDefinition"], Cluster=Ref(ClusterName), DesiredCount=1, TaskDefinition=Ref(Task_Definition), ServiceName=Join("", [Ref(Application), "-ecs-service"]), LoadBalancers=[ ecs.LoadBalancer(ContainerName=Join( "", [Ref(Application)]), ContainerPort=Ref(ContainerPort), TargetGroupArn=Ref(NlbTargetGroup)) ])) AppDNSRecord = template.add_resource( RecordSetType( "AppDNSRecord", DependsOn=["AppService"], HostedZoneName=Join("", [Ref(HostedZoneName), "."]), Name=Join("", [Ref(Application), ".", Ref(HostedZoneName), "."]), Type="CNAME", TTL="900", ResourceRecords=[GetAtt(NetworkLB, "DNSName")])) template.add_output( Output("URL", Description="DomainName", Value=Join("", ["https://", Ref(AppDNSRecord)]))) with open("ecs-ec2-service-cf.yaml", "w") as yamlout: yamlout.write(template.to_yaml())
def __init__(self, template, title, definition, config): """ :param troposphere.Template template: template to add the container definition to :param str title: name of the resource / service :param dict definition: service definition :param ServiceConfig config: service configuration """ if not set(self.required_keys).issubset(set(definition)): raise AttributeError( "Required attributes for a ecs_service are", self.required_keys ) image_param = Parameter( f"{title}ImageUrl", Type="String", Description=f"ImageURL for {title}", ) add_parameters(template, [image_param]) self.stack_parameters = {image_param.title: definition["image"]} if isinstance(config.cpu_alloc, int): cpu_config = config.cpu_alloc elif isinstance(config.cpu_alloc, Ref) and isinstance(config.cpu_resa, int): cpu_config = config.cpu_resa else: cpu_config = Ref(AWS_NO_VALUE) self.definition = ContainerDefinition( f"{title}Container", Image=Ref(image_param), Name=title, Cpu=cpu_config, Memory=config.mem_alloc, MemoryReservation=config.mem_resa, PortMappings=[ PortMapping(ContainerPort=port, HostPort=port) for port in config.ingress_mappings.keys() ] if keyisset("ports", definition) else Ref(AWS_NO_VALUE), Environment=import_env_variables(definition["environment"]) if keyisset("environment", definition) else Ref(AWS_NO_VALUE), LogConfiguration=LogConfiguration( LogDriver="awslogs", Options={ "awslogs-group": Ref(ecs_params.LOG_GROUP_T), "awslogs-region": Ref("AWS::Region"), "awslogs-stream-prefix": title, }, ), Command=definition["command"].strip().split(";") if keyisset("command", definition) else Ref(AWS_NO_VALUE), DependsOn=[ContainerDependency(**args) for args in config.family_dependents] if config.family_dependents else Ref(AWS_NO_VALUE), Essential=config.essential, HealthCheck=config.healthcheck if isinstance(config.healthcheck, HealthCheck) else Ref(AWS_NO_VALUE), ) values = [] if isinstance(config.cpu_resa, int): values.append(("Cpu", "Cpu", str(config.cpu_resa))) if isinstance(config.cpu_resa, int): values.append(("Memory", "Memory", str(config.mem_alloc))) if isinstance(config.mem_resa, int): values.append( ("MemoryReservation", "MemoryReservation", str(config.mem_resa)) ) template.add_output(ComposeXOutput(title, values, export=False).outputs)
template.add_resource(listener_rule) test_task_definition = TaskDefinition( "taskdefinition", ContainerDefinitions=[ ContainerDefinition( DockerLabels={}, Environment=[], Image="nginxdemos/hello", Name="helloworld", MemoryReservation=64, PortMappings=[ PortMapping( "httpportmapping", ContainerPort=80, Protocol="tcp" ) ] ) ] ) template.add_resource(test_task_definition) service = Service( "service", Cluster="live", DesiredCount=1, LaunchType="EC2", LoadBalancers=[
def generate_template(d): # Set template metadata t = Template() t.add_version("2010-09-09") t.set_description(d["cf_template_description"]) aws_account_id = Ref("AWS::AccountId") aws_region = Ref("AWS::Region") # Task definition task_definition = t.add_resource( TaskDefinition( "TaskDefinition", Family=Join( "", [d["env"], "-", d["project_name"], "-", d["service_name"]]), RequiresCompatibilities=["FARGATE"], Cpu=d["container_cpu"], Memory=d["container_memory"], NetworkMode="awsvpc", ExecutionRoleArn=ImportValue(d["ecs_stack_name"] + "-ECSClusterRole"), ContainerDefinitions=[ ContainerDefinition( Name=Join("", [ d["env"], "-", d["project_name"], "-", d["service_name"] ]), Image=Join( "", [ aws_account_id, ".dkr.ecr.", aws_region, ".amazonaws.com/", d["env"], d["project_name"], d["service_name"], ":latest" ], ), Essential=True, PortMappings=[ PortMapping( ContainerPort=d["container_port"], HostPort=d["container_port"], ) ], EntryPoint=["sh", "-c"], Command=[d["container_command"]], LogConfiguration=LogConfiguration( LogDriver="awslogs", Options={ "awslogs-region": aws_region, "awslogs-group": Join("", [ d["env"], "-", d["project_name"], "-", d["service_name"] ]), "awslogs-stream-prefix": "ecs", "awslogs-create-group": "true" })) ], Tags=Tags(d["tags"], {"Name": d["project_name"] + "-task-definition"}), )) # ECR ecr = t.add_resource( Repository( "ECR", DependsOn="ListenerRule", RepositoryName=Join( "", [d["env"], "-", d["project_name"], "-", d["service_name"]]), Tags=Tags(d["tags"], {"Name": d["project_name"] + "-ecr"}), )) # Target group target_group = t.add_resource( elb.TargetGroup( "TargetGroup", Name=Join("", [d["env"], "-", d["service_name"]]), HealthCheckIntervalSeconds="30", HealthCheckProtocol="HTTP", HealthCheckPort=d["container_port"], HealthCheckTimeoutSeconds="10", HealthyThresholdCount="4", HealthCheckPath=d["tg_health_check_path"], Matcher=elb.Matcher(HttpCode="200-299"), Port=d["container_port"], Protocol="HTTP", TargetType="ip", UnhealthyThresholdCount="3", VpcId=ImportValue(d["network_stack_name"] + "-VPCId"), Tags=Tags(d["tags"], {"Name": d["project_name"] + "-ecr"}), )) # Listener rule t.add_resource( elb.ListenerRule( "ListenerRule", DependsOn="TargetGroup", ListenerArn=ImportValue(d["ecs_stack_name"] + "-ListenerArnHTTP"), Conditions=[ elb.Condition(Field="path-pattern", Values=[d["application_path_api"]]) ], Actions=[ elb.Action(Type="forward", TargetGroupArn=Ref(target_group)) ], Priority="1", )) # ECS service ecs_service = t.add_resource( Service( "ECSService", ServiceName=Join( "", [d["env"], "-", d["project_name"], "-", d["service_name"]]), DependsOn="pipeline", DesiredCount=d["container_desired_tasks_count"], TaskDefinition=Ref(task_definition), LaunchType="FARGATE", NetworkConfiguration=NetworkConfiguration( AwsvpcConfiguration=AwsvpcConfiguration( Subnets=[ ImportValue(d["network_stack_name"] + "-PrivateSubnetId1"), ImportValue(d["network_stack_name"] + "-PrivateSubnetId2"), ], SecurityGroups=[ ImportValue(d["ecs_stack_name"] + "-ECSClusterSG") ], )), LoadBalancers=([ LoadBalancer( ContainerName=Join( "", [ d["env"], "-", d["project_name"], "-", d["service_name"] ], ), ContainerPort=d["container_port"], TargetGroupArn=Ref(target_group), ) ]), Cluster=ImportValue(d["ecs_stack_name"] + "-ECSClusterName"), Tags=Tags(d["tags"], {"Name": d["project_name"] + "-ecs-service"}), )) # App Autoscaling target # App Autoscaling policy # Codebuild project codebuild = t.add_resource( Project( "codebuild", Name=Join( "", [d["env"], "-", d["project_name"], "-", d["service_name"]]), DependsOn="ECR", ServiceRole=ImportValue(d["ecs_stack_name"] + "-CodebuildDeveloperRole"), Artifacts=Artifacts( Name="Build", Location=d["artifact_store"], Type="S3", ), Description="Build a docker image and send it to ecr", Source=Source( BuildSpec="buildspec.yml", Type="S3", Location=d["artifact_store"] + "/" + d["artifact_name"], ), Environment=Environment( ComputeType="BUILD_GENERAL1_SMALL", Image="aws/codebuild/standard:4.0", PrivilegedMode=True, Type="LINUX_CONTAINER", EnvironmentVariables=[ EnvironmentVariable( Name="AWS_DEFAULT_REGION", Type="PLAINTEXT", Value=aws_region, ), EnvironmentVariable( Name="SERVICE_NAME", Type="PLAINTEXT", Value=Join( "", [ d["env"], "-", d["project_name"], "-", d["service_name"] ], ), ), EnvironmentVariable( Name="IMAGE_URI", Type="PLAINTEXT", Value=Join( "", [ aws_account_id, ".dkr.ecr.", aws_region, ".amazonaws.com/", d["env"], "-", d["project_name"], "-", d["service_name"], ], ), ), ], ), Tags=Tags(d["tags"], {"Name": d["project_name"] + "-codebuild"}), )) # Codepipeline pipeline = t.add_resource( Pipeline( "pipeline", Name=Join( "", [d["env"], "-", d["project_name"], "-", d["service_name"]]), RoleArn=ImportValue(d["ecs_stack_name"] + "-CodePipelineRole"), Stages=[ Stages( Name="Source", Actions=[ Actions( Name="Source", ActionTypeId=ActionTypeId( Category="Source", Owner="AWS", Version="1", Provider="S3", ), OutputArtifacts=[ OutputArtifacts(Name="source_artifact") ], Configuration={ "S3Bucket": d["artifact_store"], "S3ObjectKey": d["artifact_name"], }, RunOrder="1", ) ], ), Stages( Name="Build", Actions=[ Actions( Name="Build", InputArtifacts=[ InputArtifacts(Name="source_artifact") ], OutputArtifacts=[ OutputArtifacts(Name="build_artifact") ], ActionTypeId=ActionTypeId( Category="Build", Owner="AWS", Version="1", Provider="CodeBuild", ), Configuration={"ProjectName": Ref(codebuild)}, RunOrder="1", ) ], ), Stages( Name="Deploy", Actions=[ Actions( Name="Deploy", InputArtifacts=[ InputArtifacts(Name="build_artifact") ], ActionTypeId=ActionTypeId( Category="Deploy", Owner="AWS", Version="1", Provider="ECS", ), Configuration={ "ClusterName": ImportValue(d["ecs_stack_name"] + "-ECSClusterName"), "ServiceName": Join( "", [ d["env"], "-", d["project_name"], "-", d["service_name"], ], ), "FileName": "definitions.json", }, ) ], ), ], ArtifactStore=ArtifactStore(Type="S3", Location=d["artifact_store"]), )) # Route53 # Outputs return t
Name="WebWorker", # 1024 is full CPU Cpu=web_worker_cpu, Memory=web_worker_memory, Essential=True, Image=Join("", [ Ref(AWS_ACCOUNT_ID), ".dkr.ecr.", Ref(AWS_REGION), ".amazonaws.com/", repository, ":", application_revision, ]), PortMappings=[PortMapping( HostPort=0, ContainerPort=web_worker_port, )], LogConfiguration=LogConfiguration( LogDriver="awslogs", Options={ 'awslogs-group': Ref(web_log_group), 'awslogs-region': Ref(AWS_REGION), } ), Environment=[ Environment( Name="AWS_STORAGE_BUCKET_NAME", Value=assets_bucket, ), Environment( Name="CDN_DOMAIN_NAME",
def __create_ecs(): template = Template() desired_count = template.add_parameter( parameter=Parameter(title='DesiredCount', Default=1, Type='Number')) cpu = template.add_parameter( parameter=Parameter(title='Cpu', Default=256, Type='Number')) memory = template.add_parameter( parameter=Parameter(title='Memory', Default=512, Type='Number')) cluster = template.add_resource(resource=Cluster(title='SampleCluster', )) log_group = template.add_resource(resource=LogGroup( title='SampleLogGroup', LogGroupName='/aws/ecs/sample')) container_name = 'sample-nginx' task_definition = template.add_resource(resource=TaskDefinition( title='SampleTaskDefinition', Cpu=Ref(cpu), Family='sample-fargate-task', RequiresCompatibilities=['FARGATE'], Memory=Ref(memory), NetworkMode='awsvpc', ExecutionRoleArn=Sub( 'arn:aws:iam::${AWS::AccountId}:role/ecsTaskExecutionRole'), ContainerDefinitions=[ ContainerDefinition( Image='nginx:latest', Name=container_name, PortMappings=[ PortMapping(ContainerPort=80, HostPort=80, Protocol='tcp') ], LogConfiguration=LogConfiguration( LogDriver='awslogs', Options={ 'awslogs-region': Ref('AWS::Region'), 'awslogs-group': Ref(log_group), 'awslogs-stream-prefix': 'nginx' })) ])) template.add_resource(resource=Service( title='SampleService', ServiceName='sample-fargate', Cluster=Ref(cluster), DesiredCount=Ref(desired_count), TaskDefinition=Ref(task_definition), LaunchType='FARGATE', NetworkConfiguration=NetworkConfiguration( AwsvpcConfiguration=AwsvpcConfiguration( AssignPublicIp='ENABLED', SecurityGroups=[ ImportValue(ExportName.TASK_SECURITY_GROUP.value) ], Subnets=[ ImportValue( CommonResource.ExportName.PUBLIC_SUBNET_A_ID.value), ImportValue( CommonResource.ExportName.PUBLIC_SUBNET_B_ID.value), ])), LoadBalancers=[ EcsLoadBalancer(ContainerName=container_name, ContainerPort=80, TargetGroupArn=ImportValue( ExportName.TARGET_GROUP.value)) ])) output_template_file(template, 'ecs.yml')
def add_ecs_task(self): ''' Add ECS Task ''' self.cfn_template.add_resource( TaskDefinition( title=constants.TASK, Volumes=[Volume(Name='anchore_db_vol')], TaskRoleArn=GetAtt(constants.TASK_ROLE, 'Arn'), ContainerDefinitions=[ ContainerDefinition( Name='anchore-engine', Hostname='anchore-engine', Cpu=int('512'), MemoryReservation=int('1536'), Essential=bool('true'), Image=ImportValue( Sub('${Environment}-${AnchoreEngineImage}')), PortMappings=[ PortMapping( ContainerPort=int('8228'), HostPort=int('8228'), Protocol='tcp', ), PortMapping( ContainerPort=int('8338'), HostPort=int('8338'), Protocol='tcp', ), ], DockerSecurityOptions=['apparmor:docker-default'], Environment=[ Environment(Name='ANCHORE_HOST_ID', Value='anchore-engine'), Environment(Name='ANCHORE_ENDPOINT_HOSTNAME', Value='anchore-engine'), Environment(Name='ANCHORE_DB_HOST', Value='anchore-db'), Environment(Name='ANCHORE_DB_PASSWORD', Value=Ref('AnchoreDBPassword')), Environment(Name='AWS_DEFAULT_REGION', Value=Ref('AWS::Region')), Environment(Name='region', Value=Ref('AWS::Region')), ], LogConfiguration=LogConfiguration( LogDriver='awslogs', Options={ "awslogs-group": Ref('EngineLogGroup'), "awslogs-region": Ref('AWS::Region'), "awslogs-stream-prefix": Join('', ['anchore-engine', 'logs']) }), Links=['anchore-db']), ContainerDefinition( Name='anchore-db', Hostname='anchore-db', Cpu=int('512'), MemoryReservation=int('1536'), Essential=bool('true'), Image=Ref('ArchoreDatabaseImage'), PortMappings=[ PortMapping( ContainerPort=int('5432'), HostPort=int('5432'), Protocol='tcp', ) ], DockerSecurityOptions=['apparmor:docker-default'], MountPoints=[ MountPoint(ContainerPath=Ref('PGDATA'), SourceVolume='anchore_db_vol') ], Environment=[ Environment(Name='POSTGRES_PASSWORD', Value=Ref('AnchoreDBPassword')), Environment(Name='PGDATA', Value=Ref('PGDATA')), Environment(Name='AWS_DEFAULT_REGION', Value=Ref('AWS::Region')), Environment(Name='region', Value=Ref('AWS::Region')), ], LogConfiguration=LogConfiguration( LogDriver='awslogs', Options={ "awslogs-group": Ref('DatabaseLogGroup'), "awslogs-region": Ref('AWS::Region'), "awslogs-stream-prefix": Join('', ['anchore-db', 'logs']) })) ])) return self.cfn_template
Description='A VPC subnet ID for the container.', )) cluster = t.add_resource(Cluster('Cluster')) task_definition = t.add_resource( TaskDefinition('TaskDefinition', RequiresCompatibilities=['FARGATE'], Cpu='256', Memory='512', NetworkMode='awsvpc', ContainerDefinitions=[ ContainerDefinition( Name='nginx', Image='nginx', Essential=True, PortMappings=[PortMapping(ContainerPort=80)]) ])) service = t.add_resource( Service( 'NginxService', Cluster=Ref(cluster), DesiredCount=1, TaskDefinition=Ref(task_definition), LaunchType='FARGATE', NetworkConfiguration=NetworkConfiguration( AwsvpcConfiguration=AwsvpcConfiguration(Subnets=[Ref('Subnet')])))) print(t.to_json())
RoleName="HelloWorldTaskExecutionRole")) ### ECS Task Definition ### resources["HelloWorldTaskDef"] = template.add_resource( TaskDefinition("HelloWorldTaskDef", ContainerDefinitions=[ ContainerDefinition( "HelloWorldWebContDef", Cpu=256, Image="andreaarduino/arduino-hello-world:web-v3", Memory=512, Name="hello-world-web", PortMappings=[ PortMapping("HelloWorldPortMapping", ContainerPort=80, HostPort=80, Protocol="tcp") ], ), ContainerDefinition( "HelloWorldAppContDef", Cpu=256, Image="andreaarduino/arduino-hello-world:app-v3", Memory=512, Name="hello-world-app", PortMappings=[ PortMapping("HelloWorldPortMapping", ContainerPort=8080, HostPort=8080, Protocol="tcp") ])