def _gen_log_config(self, config): if 'log_configuration' in config: return LogConfiguration(**config['log_configuration']) env_log_group = '-'.join([self.environment, 'logs']) return LogConfiguration(LogDriver="awslogs", Options={ 'awslogs-stream-prefix': self.service_name, 'awslogs-group': self.configuration.get( 'log_group', env_log_group), 'awslogs-region': self.region })
def handle_firelens_options(service: ComposeService, logging_def: dict, set_cw_default: bool = False) -> LogConfiguration: default_cloudwatch_options = { "region": Region, "auto_create_group": True, "log_group_name": service.logical_name, "log_stream_prefix": service.service_name, } if set_cw_default: options = set_else_none("options", logging_def, alt_value=default_cloudwatch_options) else: options = set_else_none("options", logging_def, alt_value=NoValue) config_name_map = { "delivery_stream": "kinesis_firehose", "log_group_name": "cloudwatch", "stream": "kinesis_streams", "bucket": "s3", } for key, value in config_name_map.items(): if keyisset(key, options): options.update({"Name": value}) break return LogConfiguration(LogDriver="awsfirelens", Options=options)
def define_xray_container(): """ Function to define the XRay container to run with the app :return: """ xray_container = ContainerDefinition( Image=Ref(XRAY_IMAGE), Name="xray-daemon", PortMappings=[ PortMapping(ContainerPort=2000, Protocol="UDP", HostPort=2000) ], Cpu=32, Memory=256, MemoryReservation=256, Essential=False, LogConfiguration=LogConfiguration( LogDriver="awslogs", Options={ "awslogs-group": Ref(LOG_GROUP_T), "awslogs-region": Ref("AWS::Region"), "awslogs-stream-prefix": "xray-daemon", }, ), ) return xray_container
def _gen_log_config(self, service_name): return LogConfiguration(LogDriver="awslogs", Options={ 'awslogs-stream-prefix': service_name, 'awslogs-group': '-'.join([self.env, 'logs']), 'awslogs-region': self.region })
def handle_awslogs_options(service: ComposeService, logging_def: dict) -> LogConfiguration: options_def = set_else_none("options", logging_def) options = { "awslogs-group": set_else_none("awslogs-group", options_def, alt_value=service.logical_name), "awslogs-region": set_else_none("awslogs-region", options_def, alt_value=Region), "awslogs-stream-prefix": set_else_none("awslogs-stream-prefix", options_def, alt_value=service.name), "awslogs-endpoint": set_else_none("awslogs-endpoint", options_def, alt_value=NoValue), "awslogs-datetime-format": set_else_none( "awslogs-datetime-format", options_def, alt_value=NoValue, ), "awslogs-multiline-pattern": set_else_none( "awslogs-multiline-pattern", options_def, alt_value=NoValue, ), "mode": set_else_none("mode", options_def, alt_value=NoValue), "max-buffer-size": set_else_none("max-buffer-size", options_def, alt_value=NoValue), } if keypresent("awslogs-create-group", options_def) and isinstance( options_def["awslogs-create-group"], bool): options["awslogs-create-group"] = keyisset("awslogs-create-group", options_def) elif keypresent("awslogs-create-group", options_def) and isinstance( options_def["awslogs-create-group"], str): options["awslogs-create-group"] = options_def[ "awslogs-create-group"] in [ "yes", "true", "Yes", "True", ] return LogConfiguration( LogDriver="awslogs", Options=options, )
def set_update_log_configuration(self, **kwargs): if kwargs and keyisset("LogDriver", kwargs) and keyisset("Options", kwargs): self.log_configuration = LogConfiguration(**kwargs) return if self.log_driver == "awslogs": self.log_configuration = handle_awslogs_options( self.service, self.log_config ) if self.replace_cw_with_firelens: self.log_configuration = replace_awslogs_with_firelens_configuration( self.service, self.log_configuration ) elif self.log_driver == "awsfirelens": self.log_configuration = handle_firelens_options( self.service, self.log_config )
def replace_awslogs_with_firelens_configuration( service: ComposeService, awslogs_config: LogConfiguration) -> LogConfiguration: """ Remaps the awslogs driver options into the fluentbit options :param ComposeService service: :param LogConfiguration awslogs_config: :return: """ awslogs_to_fluentbit = { "awslogs-group": "log_group_name", "awslogs-stream-prefix": "log_stream_prefix", "awslogs-endpoint": "endpoint", "awslogs-region": "region", "awslogs-create-group": "auto_create_group", } set_options = awslogs_config.Options fluent_bit_options: dict = {"Name": "cloudwatch"} for awslogs_option, fluentbit_option in awslogs_to_fluentbit.items(): if keyisset(awslogs_option, set_options): if (isinstance(set_options[awslogs_option], Ref) and set_options[awslogs_option] == NoValue): continue elif set_options[awslogs_option]: fluent_bit_options[fluentbit_option] = set_options[ awslogs_option] if not keyisset("log_group_name", fluent_bit_options): fluent_bit_options[ "log_group_name"] = f"ecs/svc/{service.logical_name}" if not keyisset("log_stream_prefix", fluent_bit_options): fluent_bit_options["log_stream_prefix"] = service.name if not keypresent("auto_create_group", fluent_bit_options): fluent_bit_options["auto_create_group"] = True return LogConfiguration(LogDriver="awsfirelens", Options=fluent_bit_options)
def build_template(sierrafile): template = Template() template.add_version('2010-09-09') template.add_metadata(build_interface(sierrafile.extra_params)) parameters = AttrDict( # Network Parameters vpc_cidr=template.add_parameter(Parameter( 'VpcCidr', Type='String', Default='192.172.0.0/16', )), subnet1_cidr=template.add_parameter(Parameter( 'Subnet1Cidr', Type='String', Default='192.172.1.0/24', )), subnet2_cidr=template.add_parameter(Parameter( 'Subnet2Cidr', Type='String', Default='192.172.2.0/24', )), # ECS Parameters cluster_size=template.add_parameter(Parameter( 'ClusterSize', Type='Number', Default=2, )), instance_type=template.add_parameter(Parameter( 'InstanceType', Type='String', Default='t2.medium' )), key_name=template.add_parameter(Parameter( 'KeyName', Type='AWS::EC2::KeyPair::KeyName', )), image_id=template.add_parameter(Parameter( 'ImageId', Type='AWS::SSM::Parameter::Value<AWS::EC2::Image::Id>', Default=( '/aws/service/ecs/optimized-ami' '/amazon-linux/recommended/image_id' ), Description=( 'An SSM parameter that resolves to a valid AMI ID.' ' This is the AMI that will be used to create ECS hosts.' ' The default is the current recommended ECS-optimized AMI.' ) )), # Other Parameters github_token=template.add_parameter(Parameter( 'GitHubToken', Type='String', NoEcho=True, )), ) # Environment Variable Parameters for env_var_param, env_var_name in sierrafile.extra_params: template.add_parameter(Parameter( env_var_param, Type='String', NoEcho=True, )) # Resource Declarations # # Network network_vpc = template.add_resource(VPC( 'NetworkVpc', CidrBlock=Ref(parameters.vpc_cidr), Tags=Tags(Name=Ref('AWS::StackName')), )) network_ig = template.add_resource(InternetGateway( 'NetworkInternetGateway', Tags=Tags(Name=Ref('AWS::StackName')), )) vpc_attach = template.add_resource(VPCGatewayAttachment( 'NetworkInternetGatewayAttachment', InternetGatewayId=Ref(network_ig), VpcId=Ref(network_vpc), )) route_table = template.add_resource(RouteTable( 'NetworkRouteTable', VpcId=Ref(network_vpc), Tags=Tags(Name=Ref('AWS::StackName')), )) template.add_resource(Route( 'NetworkDefaultRoute', DependsOn=[vpc_attach.title], RouteTableId=Ref(route_table), DestinationCidrBlock='0.0.0.0/0', GatewayId=Ref(network_ig), )) subnet1 = template.add_resource(Subnet( 'NetworkSubnet1', VpcId=Ref(network_vpc), AvailabilityZone=Select(0, GetAZs()), MapPublicIpOnLaunch=True, CidrBlock=Ref(parameters.subnet1_cidr), Tags=Tags(Name=Sub('${AWS::StackName} (Public)')), )) subnet2 = template.add_resource(Subnet( 'NetworkSubnet2', VpcId=Ref(network_vpc), AvailabilityZone=Select(1, GetAZs()), MapPublicIpOnLaunch=True, CidrBlock=Ref(parameters.subnet2_cidr), Tags=Tags(Name=Sub('${AWS::StackName} (Public)')), )) template.add_resource(SubnetRouteTableAssociation( 'NetworkSubnet1RouteTableAssociation', RouteTableId=Ref(route_table), SubnetId=Ref(subnet1), )) template.add_resource(SubnetRouteTableAssociation( 'NetworkSubnet2RouteTableAssociation', RouteTableId=Ref(route_table), SubnetId=Ref(subnet2), )) elb = template.add_resource(LoadBalancer( ELB_NAME, Name=Sub('${AWS::StackName}-elb'), Type='network', Subnets=[Ref(subnet1), Ref(subnet2)], )) # # Cluster ecs_host_role = template.add_resource(Role( 'EcsHostRole', AssumeRolePolicyDocument=PolicyDocument( Statement=[Statement( Effect=Allow, Principal=Principal('Service', 'ec2.amazonaws.com'), Action=[awacs.sts.AssumeRole] )], ), ManagedPolicyArns=[ 'arn:aws:iam::aws:policy/' 'service-role/AmazonEC2ContainerServiceforEC2Role' ] )) ecs_host_profile = template.add_resource(InstanceProfile( 'EcsHostInstanceProfile', Roles=[Ref(ecs_host_role)] )) ecs_host_sg = template.add_resource(SecurityGroup( 'EcsHostSecurityGroup', GroupDescription=Sub('${AWS::StackName}-hosts'), VpcId=Ref(network_vpc), SecurityGroupIngress=[SecurityGroupRule( CidrIp='0.0.0.0/0', IpProtocol='-1' )] )) cluster = template.add_resource(Cluster( 'EcsCluster', ClusterName=Ref('AWS::StackName') )) autoscaling_name = 'EcsHostAutoScalingGroup' launch_conf_name = 'EcsHostLaunchConfiguration' launch_conf = template.add_resource(LaunchConfiguration( launch_conf_name, ImageId=Ref(parameters.image_id), InstanceType=Ref(parameters.instance_type), IamInstanceProfile=Ref(ecs_host_profile), KeyName=Ref(parameters.key_name), SecurityGroups=[Ref(ecs_host_sg)], UserData=Base64(Sub( '#!/bin/bash\n' 'yum install -y aws-cfn-bootstrap\n' '/opt/aws/bin/cfn-init -v' ' --region ${AWS::Region}' ' --stack ${AWS::StackName}' f' --resource {launch_conf_name}\n' '/opt/aws/bin/cfn-signal -e $?' ' --region ${AWS::Region}' ' --stack ${AWS::StackName}' f' --resource {autoscaling_name}\n' )), Metadata={ 'AWS::CloudFormation::Init': { 'config': { 'commands': { '01_add_instance_to_cluster': { 'command': Sub( f'echo ECS_CLUSTER=${{{cluster.title}}}' f' > /etc/ecs/ecs.config' ), } }, 'files': { '/etc/cfn/cfn-hup.conf': { 'mode': 0o400, 'owner': 'root', 'group': 'root', 'content': Sub( '[main]\n' 'stack=${AWS::StackId}\n' 'region=${AWS::Region}\n' ), }, '/etc/cfn/hooks.d/cfn-auto-reloader.conf': { 'content': Sub( '[cfn-auto-reloader-hook]\n' 'triggers=post.update\n' 'path=Resources.ContainerInstances.Metadata' '.AWS::CloudFormation::Init\n' 'action=/opt/aws/bin/cfn-init -v' ' --region ${AWS::Region}' ' --stack ${AWS::StackName}' f' --resource {launch_conf_name}\n' ), }, }, 'services': { 'sysvinit': { 'cfn-hup': { 'enabled': True, 'ensureRunning': True, 'files': [ '/etc/cfn/cfn-hup.conf', '/etc/cfn/hooks.d/cfn-auto-reloader.conf' ] } } } } } } )) autoscaling_group = template.add_resource(AutoScalingGroup( autoscaling_name, VPCZoneIdentifier=[Ref(subnet1), Ref(subnet2)], LaunchConfigurationName=Ref(launch_conf), DesiredCapacity=Ref(parameters.cluster_size), MinSize=Ref(parameters.cluster_size), MaxSize=Ref(parameters.cluster_size), Tags=[{ 'Key': 'Name', 'Value': Sub('${AWS::StackName} - ECS Host'), 'PropagateAtLaunch': True, }], CreationPolicy=CreationPolicy( ResourceSignal=ResourceSignal(Timeout='PT15M'), ), UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( MinInstancesInService=1, MaxBatchSize=1, PauseTime='PT5M', WaitOnResourceSignals=True, ), ), )) # # Services task_role = template.add_resource(Role( 'TaskExecutionRole', AssumeRolePolicyDocument=PolicyDocument( Statement=[Statement( Effect=Allow, Principal=Principal('Service', 'ecs-tasks.amazonaws.com'), Action=[awacs.sts.AssumeRole], )], ), ManagedPolicyArns=[ 'arn:aws:iam::aws:policy/' 'service-role/AmazonECSTaskExecutionRolePolicy' ], )) artifact_bucket = template.add_resource(Bucket( 'ArtifactBucket', DeletionPolicy='Retain', )) codebuild_role = template.add_resource(Role( 'CodeBuildServiceRole', Path='/', AssumeRolePolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Principal=Principal( 'Service', 'codebuild.amazonaws.com' ), Action=[ awacs.sts.AssumeRole, ], ), ], ), Policies=[Policy( PolicyName='root', PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Resource=['*'], Effect=Allow, Action=[ awacs.ssm.GetParameters, ], ), Statement( Resource=['*'], Effect=Allow, Action=[ awacs.s3.GetObject, awacs.s3.PutObject, awacs.s3.GetObjectVersion, ], ), Statement( Resource=['*'], Effect=Allow, Action=[ awacs.logs.CreateLogGroup, awacs.logs.CreateLogStream, awacs.logs.PutLogEvents, ], ), ], ), )], )) codepipeline_role = template.add_resource(Role( 'CodePipelineServiceRole', Path='/', AssumeRolePolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Principal=Principal( 'Service', 'codepipeline.amazonaws.com' ), Action=[ awacs.sts.AssumeRole, ], ), ], ), Policies=[Policy( PolicyName='root', PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Resource=[ Sub(f'${{{artifact_bucket.title}.Arn}}/*') ], Effect=Allow, Action=[ awacs.s3.GetBucketVersioning, awacs.s3.GetObject, awacs.s3.GetObjectVersion, awacs.s3.PutObject, ], ), Statement( Resource=['*'], Effect=Allow, Action=[ awacs.ecs.DescribeServices, awacs.ecs.DescribeTaskDefinition, awacs.ecs.DescribeTasks, awacs.ecs.ListTasks, awacs.ecs.RegisterTaskDefinition, awacs.ecs.UpdateService, awacs.codebuild.StartBuild, awacs.codebuild.BatchGetBuilds, awacs.iam.PassRole, ], ), ], ), )], )) log_group = template.add_resource(LogGroup( 'LogGroup', LogGroupName=Sub('/ecs/${AWS::StackName}'), )) if any(conf.pipeline.enable for conf in sierrafile.services.values()): project = template.add_resource(Project( 'CodeBuildProject', Name=Sub('${AWS::StackName}-build'), ServiceRole=Ref(codebuild_role), Artifacts=Artifacts(Type='CODEPIPELINE'), Source=Source(Type='CODEPIPELINE'), Environment=Environment( ComputeType='BUILD_GENERAL1_SMALL', Image='aws/codebuild/docker:17.09.0', Type='LINUX_CONTAINER', ), )) for name, settings in sierrafile.services.items(): task_definition = template.add_resource(TaskDefinition( f'{name}TaskDefinition', RequiresCompatibilities=['EC2'], Cpu=str(settings.container.cpu), Memory=str(settings.container.memory), NetworkMode='bridge', ExecutionRoleArn=Ref(task_role.title), ContainerDefinitions=[ ContainerDefinition( Name=f'{name}', Image=settings.container.image, Memory=str(settings.container.memory), Essential=True, PortMappings=[ PortMapping( ContainerPort=settings.container.port, Protocol='tcp', ), ], Environment=[ troposphere.ecs.Environment(Name=k, Value=v) for k, v in sierrafile.env_vars.items() if k in settings.get('environment', []) ], LogConfiguration=LogConfiguration( LogDriver='awslogs', Options={ 'awslogs-region': Ref('AWS::Region'), 'awslogs-group': Ref(log_group.title), 'awslogs-stream-prefix': Ref('AWS::StackName'), }, ), ), ], )) target_group = template.add_resource(TargetGroup( f'{name}TargetGroup', Port=settings.container.port, Protocol='TCP', VpcId=Ref(network_vpc), Tags=Tags(Name=Sub(f'${{AWS::StackName}}-{name}')), )) listener = template.add_resource(Listener( f'{name}ElbListener', LoadBalancerArn=Ref(elb), Port=settings.container.port, Protocol='TCP', DefaultActions=[ Action(TargetGroupArn=Ref(target_group), Type='forward') ], )) service = template.add_resource(Service( f'{name}Service', Cluster=Ref(cluster), ServiceName=f'{name}-service', DependsOn=[autoscaling_group.title, listener.title], DesiredCount=settings.container.count, TaskDefinition=Ref(task_definition), LaunchType='EC2', LoadBalancers=[ troposphere.ecs.LoadBalancer( ContainerName=f'{name}', ContainerPort=settings.container.port, TargetGroupArn=Ref(target_group), ), ], )) if settings.pipeline.enable: pipeline = template.add_resource(Pipeline( f'{name}Pipeline', RoleArn=GetAtt(codepipeline_role, 'Arn'), ArtifactStore=ArtifactStore( Type='S3', Location=Ref(artifact_bucket), ), Stages=[ Stages( Name='Source', Actions=[Actions( Name='Source', ActionTypeId=ActionTypeId( Category='Source', Owner='ThirdParty', Version='1', Provider='GitHub', ), OutputArtifacts=[ OutputArtifacts(Name=f'{name}Source'), ], RunOrder='1', Configuration={ 'Owner': settings.pipeline.user, 'Repo': settings.pipeline.repo, 'Branch': settings.pipeline.branch, 'OAuthToken': Ref(parameters.github_token), }, )], ), Stages( Name='Build', Actions=[Actions( Name='Build', ActionTypeId=ActionTypeId( Category='Build', Owner='AWS', Version='1', Provider='CodeBuild', ), InputArtifacts=[ InputArtifacts(Name=f'{name}Source'), ], OutputArtifacts=[ OutputArtifacts(Name=f'{name}Build'), ], RunOrder='1', Configuration={ 'ProjectName': Ref(project), }, )], ), Stages( Name='Deploy', Actions=[Actions( Name='Deploy', ActionTypeId=ActionTypeId( Category='Deploy', Owner='AWS', Version='1', Provider='ECS', ), InputArtifacts=[ InputArtifacts(Name=f'{name}Build') ], RunOrder='1', Configuration={ 'ClusterName': Ref(cluster), 'ServiceName': Ref(service), 'FileName': 'image.json', }, )], ), ], )) template.add_resource(Webhook( f'{name}CodePipelineWebhook', Name=Sub(f'${{AWS::StackName}}-{name}-webhook'), Authentication='GITHUB_HMAC', AuthenticationConfiguration=AuthenticationConfiguration( SecretToken=Ref(parameters.github_token), ), Filters=[FilterRule( JsonPath='$.ref', MatchEquals=f'refs/heads/{settings.pipeline.branch}' )], TargetAction='Source', TargetPipeline=Ref(pipeline), TargetPipelineVersion=1, RegisterWithThirdParty=True, )) return template
".amazonaws.com/", Ref(repository), ":", app_revision, ]), PortMappings=[ PortMapping( ContainerPort=web_worker_port, HostPort=web_worker_port, ) ], LogConfiguration=LogConfiguration(LogDriver="awslogs", Options={ 'awslogs-group': Ref(container_log_group), 'awslogs-region': Ref(AWS_REGION), 'awslogs-stream-prefix': Ref(AWS_STACK_NAME), }), Environment=[ Environment(Name=k, Value=v) for k, v in environment_variables ] + [ Environment(Name="PORT", Value=web_worker_port), ], ) ], ) app_service_role = iam.Role( "AppServiceRole",
def add_ecs_task(self): ''' Add ECS Task ''' self.cfn_template.add_resource( TaskDefinition( title=constants.TASK, Volumes=[Volume(Name='anchore_db_vol')], TaskRoleArn=GetAtt(constants.TASK_ROLE, 'Arn'), ContainerDefinitions=[ ContainerDefinition( Name='anchore-engine', Hostname='anchore-engine', Cpu=int('512'), MemoryReservation=int('1536'), Essential=bool('true'), Image=ImportValue( Sub('${Environment}-${AnchoreEngineImage}')), PortMappings=[ PortMapping( ContainerPort=int('8228'), HostPort=int('8228'), Protocol='tcp', ), PortMapping( ContainerPort=int('8338'), HostPort=int('8338'), Protocol='tcp', ), ], DockerSecurityOptions=['apparmor:docker-default'], Environment=[ Environment(Name='ANCHORE_HOST_ID', Value='anchore-engine'), Environment(Name='ANCHORE_ENDPOINT_HOSTNAME', Value='anchore-engine'), Environment(Name='ANCHORE_DB_HOST', Value='anchore-db'), Environment(Name='ANCHORE_DB_PASSWORD', Value=Ref('AnchoreDBPassword')), Environment(Name='AWS_DEFAULT_REGION', Value=Ref('AWS::Region')), Environment(Name='region', Value=Ref('AWS::Region')), ], LogConfiguration=LogConfiguration( LogDriver='awslogs', Options={ "awslogs-group": Ref('EngineLogGroup'), "awslogs-region": Ref('AWS::Region'), "awslogs-stream-prefix": Join('', ['anchore-engine', 'logs']) }), Links=['anchore-db']), ContainerDefinition( Name='anchore-db', Hostname='anchore-db', Cpu=int('512'), MemoryReservation=int('1536'), Essential=bool('true'), Image=Ref('ArchoreDatabaseImage'), PortMappings=[ PortMapping( ContainerPort=int('5432'), HostPort=int('5432'), Protocol='tcp', ) ], DockerSecurityOptions=['apparmor:docker-default'], MountPoints=[ MountPoint(ContainerPath=Ref('PGDATA'), SourceVolume='anchore_db_vol') ], Environment=[ Environment(Name='POSTGRES_PASSWORD', Value=Ref('AnchoreDBPassword')), Environment(Name='PGDATA', Value=Ref('PGDATA')), Environment(Name='AWS_DEFAULT_REGION', Value=Ref('AWS::Region')), Environment(Name='region', Value=Ref('AWS::Region')), ], LogConfiguration=LogConfiguration( LogDriver='awslogs', Options={ "awslogs-group": Ref('DatabaseLogGroup'), "awslogs-region": Ref('AWS::Region'), "awslogs-stream-prefix": Join('', ['anchore-db', 'logs']) })) ])) return self.cfn_template
web_log_group = logs.LogGroup( "WebLogs", template=template, RetentionInDays=365, DeletionPolicy="Retain", ) template.add_output( Output("WebLogsGroup", Description="Web application log group", Value=GetAtt(web_log_group, "Arn"))) log_configuration = LogConfiguration(LogDriver="awslogs", Options={ 'awslogs-group': Ref(web_log_group), 'awslogs-region': Ref(AWS_REGION), }) # ECS task web_task_definition = TaskDefinition( "WebTask", template=template, Condition=deploy_condition, ContainerDefinitions=[ ContainerDefinition( Name="WebWorker", # 1024 is full CPU Cpu=web_worker_cpu, Memory=web_worker_memory, Essential=True,
Name='database__connection__host', Value=ImportValue( Sub("${DependencyStackName}-GhostDBHost")), ), Environment(Name='database__connection__user', Value='ghost'), Environment(Name='database__connection__database', Value='ghost'), Environment(Name='AWSREGION', Value=Ref('AWS::Region')) ], LogConfiguration=LogConfiguration( LogDriver='awslogs', Options={ 'awslogs-group': ImportValue( Sub("${DependencyStackName}-GhostLogGroupName")), 'awslogs-region': Ref('AWS::Region'), 'awslogs-stream-prefix': 'ghost' })) ])) ghost_service = t.add_resource( Service('GhostService', Cluster=Ref(cluster), DesiredCount=1, TaskDefinition=Ref(ghost_task_definition), LaunchType='FARGATE', LoadBalancers=[ LoadBalancer(ContainerName='ghost',
def __init__(self, template, title, definition, config): """ :param troposphere.Template template: template to add the container definition to :param str title: name of the resource / service :param dict definition: service definition :param ServiceConfig config: service configuration """ if not set(self.required_keys).issubset(set(definition)): raise AttributeError( "Required attributes for a ecs_service are", self.required_keys ) image_param = Parameter( f"{title}ImageUrl", Type="String", Description=f"ImageURL for {title}", ) add_parameters(template, [image_param]) self.stack_parameters = {image_param.title: definition["image"]} if isinstance(config.cpu_alloc, int): cpu_config = config.cpu_alloc elif isinstance(config.cpu_alloc, Ref) and isinstance(config.cpu_resa, int): cpu_config = config.cpu_resa else: cpu_config = Ref(AWS_NO_VALUE) self.definition = ContainerDefinition( f"{title}Container", Image=Ref(image_param), Name=title, Cpu=cpu_config, Memory=config.mem_alloc, MemoryReservation=config.mem_resa, PortMappings=[ PortMapping(ContainerPort=port, HostPort=port) for port in config.ingress_mappings.keys() ] if keyisset("ports", definition) else Ref(AWS_NO_VALUE), Environment=import_env_variables(definition["environment"]) if keyisset("environment", definition) else Ref(AWS_NO_VALUE), LogConfiguration=LogConfiguration( LogDriver="awslogs", Options={ "awslogs-group": Ref(ecs_params.LOG_GROUP_T), "awslogs-region": Ref("AWS::Region"), "awslogs-stream-prefix": title, }, ), Command=definition["command"].strip().split(";") if keyisset("command", definition) else Ref(AWS_NO_VALUE), DependsOn=[ContainerDependency(**args) for args in config.family_dependents] if config.family_dependents else Ref(AWS_NO_VALUE), Essential=config.essential, HealthCheck=config.healthcheck if isinstance(config.healthcheck, HealthCheck) else Ref(AWS_NO_VALUE), ) values = [] if isinstance(config.cpu_resa, int): values.append(("Cpu", "Cpu", str(config.cpu_resa))) if isinstance(config.cpu_resa, int): values.append(("Memory", "Memory", str(config.mem_alloc))) if isinstance(config.mem_resa, int): values.append( ("MemoryReservation", "MemoryReservation", str(config.mem_resa)) ) template.add_output(ComposeXOutput(title, values, export=False).outputs)
def generate_template(d): # Set template metadata t = Template() t.add_version("2010-09-09") t.set_description(d["cf_template_description"]) aws_account_id = Ref("AWS::AccountId") aws_region = Ref("AWS::Region") # Task definition task_definition = t.add_resource( TaskDefinition( "TaskDefinition", Family=Join( "", [d["env"], "-", d["project_name"], "-", d["service_name"]]), RequiresCompatibilities=["FARGATE"], Cpu=d["container_cpu"], Memory=d["container_memory"], NetworkMode="awsvpc", ExecutionRoleArn=ImportValue(d["ecs_stack_name"] + "-ECSClusterRole"), ContainerDefinitions=[ ContainerDefinition( Name=Join("", [ d["env"], "-", d["project_name"], "-", d["service_name"] ]), Image=Join( "", [ aws_account_id, ".dkr.ecr.", aws_region, ".amazonaws.com/", d["env"], d["project_name"], d["service_name"], ":latest" ], ), Essential=True, PortMappings=[ PortMapping( ContainerPort=d["container_port"], HostPort=d["container_port"], ) ], EntryPoint=["sh", "-c"], Command=[d["container_command"]], LogConfiguration=LogConfiguration( LogDriver="awslogs", Options={ "awslogs-region": aws_region, "awslogs-group": Join("", [ d["env"], "-", d["project_name"], "-", d["service_name"] ]), "awslogs-stream-prefix": "ecs", "awslogs-create-group": "true" })) ], Tags=Tags(d["tags"], {"Name": d["project_name"] + "-task-definition"}), )) # ECR ecr = t.add_resource( Repository( "ECR", DependsOn="ListenerRule", RepositoryName=Join( "", [d["env"], "-", d["project_name"], "-", d["service_name"]]), Tags=Tags(d["tags"], {"Name": d["project_name"] + "-ecr"}), )) # Target group target_group = t.add_resource( elb.TargetGroup( "TargetGroup", Name=Join("", [d["env"], "-", d["service_name"]]), HealthCheckIntervalSeconds="30", HealthCheckProtocol="HTTP", HealthCheckPort=d["container_port"], HealthCheckTimeoutSeconds="10", HealthyThresholdCount="4", HealthCheckPath=d["tg_health_check_path"], Matcher=elb.Matcher(HttpCode="200-299"), Port=d["container_port"], Protocol="HTTP", TargetType="ip", UnhealthyThresholdCount="3", VpcId=ImportValue(d["network_stack_name"] + "-VPCId"), Tags=Tags(d["tags"], {"Name": d["project_name"] + "-ecr"}), )) # Listener rule t.add_resource( elb.ListenerRule( "ListenerRule", DependsOn="TargetGroup", ListenerArn=ImportValue(d["ecs_stack_name"] + "-ListenerArnHTTP"), Conditions=[ elb.Condition(Field="path-pattern", Values=[d["application_path_api"]]) ], Actions=[ elb.Action(Type="forward", TargetGroupArn=Ref(target_group)) ], Priority="1", )) # ECS service ecs_service = t.add_resource( Service( "ECSService", ServiceName=Join( "", [d["env"], "-", d["project_name"], "-", d["service_name"]]), DependsOn="pipeline", DesiredCount=d["container_desired_tasks_count"], TaskDefinition=Ref(task_definition), LaunchType="FARGATE", NetworkConfiguration=NetworkConfiguration( AwsvpcConfiguration=AwsvpcConfiguration( Subnets=[ ImportValue(d["network_stack_name"] + "-PrivateSubnetId1"), ImportValue(d["network_stack_name"] + "-PrivateSubnetId2"), ], SecurityGroups=[ ImportValue(d["ecs_stack_name"] + "-ECSClusterSG") ], )), LoadBalancers=([ LoadBalancer( ContainerName=Join( "", [ d["env"], "-", d["project_name"], "-", d["service_name"] ], ), ContainerPort=d["container_port"], TargetGroupArn=Ref(target_group), ) ]), Cluster=ImportValue(d["ecs_stack_name"] + "-ECSClusterName"), Tags=Tags(d["tags"], {"Name": d["project_name"] + "-ecs-service"}), )) # App Autoscaling target # App Autoscaling policy # Codebuild project codebuild = t.add_resource( Project( "codebuild", Name=Join( "", [d["env"], "-", d["project_name"], "-", d["service_name"]]), DependsOn="ECR", ServiceRole=ImportValue(d["ecs_stack_name"] + "-CodebuildDeveloperRole"), Artifacts=Artifacts( Name="Build", Location=d["artifact_store"], Type="S3", ), Description="Build a docker image and send it to ecr", Source=Source( BuildSpec="buildspec.yml", Type="S3", Location=d["artifact_store"] + "/" + d["artifact_name"], ), Environment=Environment( ComputeType="BUILD_GENERAL1_SMALL", Image="aws/codebuild/standard:4.0", PrivilegedMode=True, Type="LINUX_CONTAINER", EnvironmentVariables=[ EnvironmentVariable( Name="AWS_DEFAULT_REGION", Type="PLAINTEXT", Value=aws_region, ), EnvironmentVariable( Name="SERVICE_NAME", Type="PLAINTEXT", Value=Join( "", [ d["env"], "-", d["project_name"], "-", d["service_name"] ], ), ), EnvironmentVariable( Name="IMAGE_URI", Type="PLAINTEXT", Value=Join( "", [ aws_account_id, ".dkr.ecr.", aws_region, ".amazonaws.com/", d["env"], "-", d["project_name"], "-", d["service_name"], ], ), ), ], ), Tags=Tags(d["tags"], {"Name": d["project_name"] + "-codebuild"}), )) # Codepipeline pipeline = t.add_resource( Pipeline( "pipeline", Name=Join( "", [d["env"], "-", d["project_name"], "-", d["service_name"]]), RoleArn=ImportValue(d["ecs_stack_name"] + "-CodePipelineRole"), Stages=[ Stages( Name="Source", Actions=[ Actions( Name="Source", ActionTypeId=ActionTypeId( Category="Source", Owner="AWS", Version="1", Provider="S3", ), OutputArtifacts=[ OutputArtifacts(Name="source_artifact") ], Configuration={ "S3Bucket": d["artifact_store"], "S3ObjectKey": d["artifact_name"], }, RunOrder="1", ) ], ), Stages( Name="Build", Actions=[ Actions( Name="Build", InputArtifacts=[ InputArtifacts(Name="source_artifact") ], OutputArtifacts=[ OutputArtifacts(Name="build_artifact") ], ActionTypeId=ActionTypeId( Category="Build", Owner="AWS", Version="1", Provider="CodeBuild", ), Configuration={"ProjectName": Ref(codebuild)}, RunOrder="1", ) ], ), Stages( Name="Deploy", Actions=[ Actions( Name="Deploy", InputArtifacts=[ InputArtifacts(Name="build_artifact") ], ActionTypeId=ActionTypeId( Category="Deploy", Owner="AWS", Version="1", Provider="ECS", ), Configuration={ "ClusterName": ImportValue(d["ecs_stack_name"] + "-ECSClusterName"), "ServiceName": Join( "", [ d["env"], "-", d["project_name"], "-", d["service_name"], ], ), "FileName": "definitions.json", }, ) ], ), ], ArtifactStore=ArtifactStore(Type="S3", Location=d["artifact_store"]), )) # Route53 # Outputs return t
Ref("AWS::Region"), ".amazonaws.com", "/", "helloworld", ":", Ref("Tag") ]), Memory=32, Cpu=256, Name="helloworld", PortMappings=[ ecs.PortMapping(ContainerPort=3000) ], Environment=[ Environment(Name='HELLOWORLD_VERSION', Value=Ref("Tag")) ], LogConfiguration=LogConfiguration( LogDriver="awslogs", Options={ 'awslogs-group': "/aws/ecs/helloworld", 'awslogs-region': Ref("AWS::Region"), })), ], )) t.add_resource( Role( "ServiceRole", AssumeRolePolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["ecs.amazonaws.com"])) ]), Path="/", ManagedPolicyArns=[
def __create_ecs(): template = Template() desired_count = template.add_parameter( parameter=Parameter(title='DesiredCount', Default=1, Type='Number')) cpu = template.add_parameter( parameter=Parameter(title='Cpu', Default=256, Type='Number')) memory = template.add_parameter( parameter=Parameter(title='Memory', Default=512, Type='Number')) cluster = template.add_resource(resource=Cluster(title='SampleCluster', )) log_group = template.add_resource(resource=LogGroup( title='SampleLogGroup', LogGroupName='/aws/ecs/sample')) container_name = 'sample-nginx' task_definition = template.add_resource(resource=TaskDefinition( title='SampleTaskDefinition', Cpu=Ref(cpu), Family='sample-fargate-task', RequiresCompatibilities=['FARGATE'], Memory=Ref(memory), NetworkMode='awsvpc', ExecutionRoleArn=Sub( 'arn:aws:iam::${AWS::AccountId}:role/ecsTaskExecutionRole'), ContainerDefinitions=[ ContainerDefinition( Image='nginx:latest', Name=container_name, PortMappings=[ PortMapping(ContainerPort=80, HostPort=80, Protocol='tcp') ], LogConfiguration=LogConfiguration( LogDriver='awslogs', Options={ 'awslogs-region': Ref('AWS::Region'), 'awslogs-group': Ref(log_group), 'awslogs-stream-prefix': 'nginx' })) ])) template.add_resource(resource=Service( title='SampleService', ServiceName='sample-fargate', Cluster=Ref(cluster), DesiredCount=Ref(desired_count), TaskDefinition=Ref(task_definition), LaunchType='FARGATE', NetworkConfiguration=NetworkConfiguration( AwsvpcConfiguration=AwsvpcConfiguration( AssignPublicIp='ENABLED', SecurityGroups=[ ImportValue(ExportName.TASK_SECURITY_GROUP.value) ], Subnets=[ ImportValue( CommonResource.ExportName.PUBLIC_SUBNET_A_ID.value), ImportValue( CommonResource.ExportName.PUBLIC_SUBNET_B_ID.value), ])), LoadBalancers=[ EcsLoadBalancer(ContainerName=container_name, ContainerPort=80, TargetGroupArn=ImportValue( ExportName.TARGET_GROUP.value)) ])) output_template_file(template, 'ecs.yml')
def add_envoy_container_definition(self): """ Method to expand the containers configuration and add the Envoy SideCar. """ envoy_container_name = "envoy" task = self.stack.service.task envoy_port_mapping = [ PortMapping(ContainerPort=15000, HostPort=15000), PortMapping(ContainerPort=15001, HostPort=15001), ] envoy_environment = [ Environment( Name="APPMESH_VIRTUAL_NODE_NAME", Value=Sub( f"mesh/${{{appmesh_params.MESH_NAME.title}}}/virtualNode/${{{self.node.title}.VirtualNodeName}}" ), ), Environment( Name="ENABLE_ENVOY_XRAY_TRACING", Value="1" if task.family_config.use_xray else "0", ), Environment(Name="ENABLE_ENVOY_STATS_TAGS", Value="1"), ] envoy_log_config = LogConfiguration( LogDriver="awslogs", Options={ "awslogs-group": Ref(ecs_params.LOG_GROUP_T), "awslogs-region": Ref("AWS::Region"), "awslogs-stream-prefix": envoy_container_name, }, ) self.stack.stack_template.add_parameter(appmesh_params.ENVOY_IMAGE_URL) envoy_container = ContainerDefinition( Image=Ref(appmesh_params.ENVOY_IMAGE_URL), Name=envoy_container_name, Cpu="128", Memory="256", User="******", Essential=True, LogConfiguration=envoy_log_config, Environment=envoy_environment, PortMappings=envoy_port_mapping, Ulimits=[Ulimit(HardLimit=15000, SoftLimit=15000, Name="nofile")], HealthCheck=HealthCheck( Command=[ "CMD-SHELL", "curl -s http://localhost:9901/server_info | grep state | grep -q LIVE", ], Interval=5, Timeout=2, Retries=3, StartPeriod=10, ), ) proxy_config = ProxyConfiguration( ContainerName="envoy", Type="APPMESH", ProxyConfigurationProperties=[ Environment(Name="IgnoredUID", Value="1337"), Environment(Name="ProxyIngressPort", Value="15000",), Environment(Name="ProxyEgressPort", Value="15001"), Environment(Name="IgnoredGID", Value=""), Environment( Name="EgressIgnoredIPs", Value="169.254.170.2,169.254.169.254" ), Environment(Name="EgressIgnoredPorts", Value=""), Environment( Name="AppPorts", Value=",".join([f"{port.Port}" for port in self.port_mappings]), ), ], ) task.containers.append(envoy_container) setattr(task.definition, "ProxyConfiguration", proxy_config) task.set_task_compute_parameter()