def send_command(self, account_ctx, region, resource, parameters, targets, document_name): ssm_client = account_ctx.get_aws_client('ssm', aws_region=region) ssm_log_group_name = prefixed_name(resource, 'paco_ssm', self.paco_ctx.legacy_flag) response = ssm_client.send_command( Parameters=parameters, Targets=targets, CloudWatchOutputConfig={ 'CloudWatchLogGroupName': ssm_log_group_name, 'CloudWatchOutputEnabled': True, }, DocumentName=document_name ) self.wait_for_command(ssm_client, account_ctx, region, resource, response['Command']['CommandId'])
def command_update_ssm_agent(self, resource, account_ctx, region): ssm_client = account_ctx.get_aws_client('ssm', aws_region=region) ssm_log_group_name = prefixed_name(resource, 'paco_ssm', self.paco_ctx.legacy_flag) response = ssm_client.send_command( Targets=[{ 'Key': 'tag:aws:cloudformation:stack-name', 'Values': [resource.stack.get_name()] },], CloudWatchOutputConfig={ 'CloudWatchLogGroupName': ssm_log_group_name, 'CloudWatchOutputEnabled': True, }, DocumentName='AWS-UpdateSSMAgent', )
def paco_ec2lm_update_instance(self, resource, account_ctx, region, cache_id): ssm_client = account_ctx.get_aws_client('ssm', aws_region=region) ssm_log_group_name = prefixed_name(resource, 'paco_ssm', self.paco_ctx.legacy_flag) ssm_client.send_command( Targets=[{ 'Key': 'tag:aws:cloudformation:stack-name', 'Values': [resource.stack.get_name()] },], DocumentName='paco_ec2lm_update_instance', Parameters={ 'CacheId': [cache_id] }, CloudWatchOutputConfig={ 'CloudWatchLogGroupName': ssm_log_group_name, 'CloudWatchOutputEnabled': True, }, )
def stack_hook_delete_log_groups(self, hook, monitoring_config): logs_client = self.account_ctx.get_aws_client('logs', self.aws_region) for log_group in monitoring_config.log_sets.get_all_log_groups(): root_log_group_name = log_group.get_full_log_group_name() log_group_name = root_log_group_name if log_group.external_resource == False: log_group_name = prefixed_name(self.stack.resource, root_log_group_name, self.paco_ctx.legacy_flag) try: logs_client.delete_log_group( logGroupName=log_group_name ) except ClientError as error: # Ignore gorups that do not exist if error.response['Error']['Code'] != 'ResourceNotFoundException': print(f'ERROR: Unable to delete LogGroup: {log_group_name}: {error.response["Error"]["Code"]}')
def update_windows_cloudwatch_agent(self): iam_policy_name = '-'.join([self.resource.name, 'cloudwatchagent']) policy_config_yaml = f""" policy_name: '{iam_policy_name}' enabled: true statement: - effect: Allow resource: "*" action: - "cloudwatch:PutMetricData" - "autoscaling:Describe*" - "ec2:DescribeTags" """ policy_config_yaml += """ - "logs:CreateLogGroup"\n""" log_group_resources = "" log_stream_resources = "" for log_group_name in self.windows_log_groups.keys(): lg_name = prefixed_name(self.resource, log_group_name, self.paco_ctx.legacy_flag) self.windows_log_groups[log_group_name] = lg_name log_group_resources += " - arn:aws:logs:{}:{}:log-group:{}:*\n".format( self.aws_region, self.account_ctx.id, lg_name, ) log_stream_resources += " - arn:aws:logs:{}:{}:log-group:{}:log-stream:*\n".format( self.aws_region, self.account_ctx.id, lg_name, ) policy_config_yaml += f""" - effect: Allow action: - "logs:DescribeLogStreams" - "logs:DescribeLogGroups" - "logs:CreateLogStream" resource: {log_group_resources} - effect: Allow action: - "logs:PutLogEvents" resource: {log_stream_resources} """ policy_name = 'policy_ssm_cloudwatchagent' iam_ctl = self.paco_ctx.get_controller('IAM') iam_ctl.add_managed_policy( role=self.resource.instance_iam_role, resource=self.resource, policy_name='policy', policy_config_yaml=policy_config_yaml, extra_ref_names=['ssm', 'cloudwatchagent'], ) # TODO: Make this work with Linux too self.stack.hooks.add( name='UpdateCloudWatchAgent.' + self.resource.name, stack_action=['create', 'update'], stack_timing='post', hook_method=self.asg_hook_update_cloudwatch_agent, cache_method=self.asg_hook_update_cloudwatch_agent_cache, hook_arg=self.resource)
def update_windows_ssm_agent(self): iam_policy_name = '-'.join([self.resource.name, 'ssmagent-policy']) ssm_prefixed_name = prefixed_name(self.resource, 'paco_ssm', self.paco_ctx.legacy_flag) # allows instance to create a LogGroup with any name - this is a requirement of the SSM Agent # if you limit the resource to just the LogGroups names you want SSM to use, the agent will not work ssm_log_group_arn = f"arn:aws:logs:{self.aws_region}:{self.account_ctx.id}:log-group:*" ssm_log_stream_arn = f"arn:aws:logs:{self.aws_region}:{self.account_ctx.id}:log-group:{ssm_prefixed_name}:log-stream:*" policy_config_yaml = f""" policy_name: '{iam_policy_name}' enabled: true statement: - effect: Allow action: - ssmmessages:CreateControlChannel - ssmmessages:CreateDataChannel - ssmmessages:OpenControlChannel - ssmmessages:OpenDataChannel - ec2messages:AcknowledgeMessage - ec2messages:DeleteMessage - ec2messages:FailMessage - ec2messages:GetEndpoint - ec2messages:GetMessages - ec2messages:SendReply - ssm:UpdateInstanceInformation - ssm:ListInstanceAssociations - ssm:DescribeInstanceProperties - ssm:DescribeDocumentParameters - ssm:PutInventory - ssm:GetDeployablePatchSnapshotForInstance - ssm:PutInventory resource: - '*' - effect: Allow action: - s3:GetEncryptionConfiguration - ssm:GetManifest resource: - '*' - effect: Allow action: - s3:GetObject resource: - 'arn:aws:s3:::aws-ssm-{self.aws_region}/*' - 'arn:aws:s3:::aws-windows-downloads-{self.aws_region}/*' - 'arn:aws:s3:::amazon-ssm-{self.aws_region}/*' - 'arn:aws:s3:::amazon-ssm-packages-{self.aws_region}/*' - 'arn:aws:s3:::{self.aws_region}-birdwatcher-prod/*' - 'arn:aws:s3:::patch-baseline-snapshot-{self.aws_region}/*' - effect: Allow action: - logs:CreateLogGroup - logs:CreateLogStream - logs:DescribeLogGroups - logs:DescribeLogStreams resource: - {ssm_log_group_arn} - effect: Allow action: - logs:PutLogEvents resource: - {ssm_log_stream_arn} """ iam_ctl = self.paco_ctx.get_controller('IAM') iam_ctl.add_managed_policy( role=self.resource.instance_iam_role, resource=self.resource, policy_name='policy', policy_config_yaml=policy_config_yaml, extra_ref_names=['ec2lm', 'ssmagent'], ) # TODO: Make this work with Linux too self.stack.hooks.add(name='UpdateSSMAgent.' + self.resource.name, stack_action=['create', 'update'], stack_timing='post', hook_method=self.asg_hook_update_ssm_agent, cache_method=None, hook_arg=self.resource)
def __init__( self, stack, paco_ctx, ): super().__init__( stack, paco_ctx, iam_capabilities=["CAPABILITY_NAMED_IAM"], ) account_ctx = stack.account_ctx aws_region = stack.aws_region self.set_aws_name('Lambda', self.resource_group_name, self.resource_name) awslambda = self.awslambda = self.stack.resource self.init_template('Lambda Function') # if not enabled finish with only empty placeholder if not awslambda.is_enabled(): return # Parameters sdb_cache_param = self.create_cfn_parameter( name='EnableSDBCache', param_type='String', description='Boolean indicating whether an SDB Domain will be created to be used as a cache.', value=awslambda.sdb_cache ) function_description_param = self.create_cfn_parameter( name='FunctionDescription', param_type='String', description='A description of the Lamdba Function.', value=awslambda.description ) handler_param = self.create_cfn_parameter( name='Handler', param_type='String', description='The name of the function to call upon execution.', value=awslambda.handler ) runtime_param = self.create_cfn_parameter( name='Runtime', param_type='String', description='The name of the runtime language.', value=awslambda.runtime ) role_arn_param = self.create_cfn_parameter( name='RoleArn', param_type='String', description='The execution role for the Lambda Function.', value=awslambda.iam_role.get_arn() ) role_name_param = self.create_cfn_parameter( name='RoleName', param_type='String', description='The execution role name for the Lambda Function.', value=awslambda.iam_role.resolve_ref_obj.role_name ) memory_size_param = self.create_cfn_parameter( name='MemorySize', param_type='Number', description="The amount of memory that your function has access to. Increasing the function's" + \ " memory also increases its CPU allocation. The default value is 128 MB. The value must be a multiple of 64 MB.", value=awslambda.memory_size ) reserved_conc_exec_param = self.create_cfn_parameter( name='ReservedConcurrentExecutions', param_type='Number', description='The number of simultaneous executions to reserve for the function.', value=awslambda.reserved_concurrent_executions ) timeout_param = self.create_cfn_parameter( name='Timeout', param_type='Number', description='The amount of time that Lambda allows a function to run before stopping it. ', value=awslambda.timeout ) layers_param = self.create_cfn_parameter( name='Layers', param_type='CommaDelimitedList', description='List of up to 5 Lambda Layer ARNs.', value=','.join(awslambda.layers) ) # create the Lambda resource cfn_export_dict = { 'Description': troposphere.Ref(function_description_param), 'Handler': troposphere.Ref(handler_param), 'MemorySize': troposphere.Ref(memory_size_param), 'Runtime': troposphere.Ref(runtime_param), 'Role': troposphere.Ref(role_arn_param), 'Timeout': troposphere.Ref(timeout_param), } if awslambda.reserved_concurrent_executions: cfn_export_dict['ReservedConcurrentExecutions'] = troposphere.Ref(reserved_conc_exec_param), if len(awslambda.layers) > 0: cfn_export_dict['Layers'] = troposphere.Ref(layers_param), # Lambda VPC if awslambda.vpc_config != None: vpc_security_group = self.create_cfn_ref_list_param( name='VpcSecurityGroupIdList', param_type='List<AWS::EC2::SecurityGroup::Id>', description='VPC Security Group Id List', value=awslambda.vpc_config.security_groups, ref_attribute='id', ) # Segment SubnetList is a Segment stack Output based on availability zones segment_ref = awslambda.vpc_config.segments[0] + '.subnet_id_list' subnet_list_param = self.create_cfn_parameter( name='VpcSubnetIdList', param_type='List<AWS::EC2::Subnet::Id>', description='VPC Subnet Id List', value=segment_ref ) cfn_export_dict['VpcConfig'] = { 'SecurityGroupIds': troposphere.Ref(vpc_security_group), 'SubnetIds': troposphere.Ref(subnet_list_param), } # Code object: S3 Bucket, inline ZipFile or deploy artifact? if awslambda.code.s3_bucket: if awslambda.code.s3_bucket.startswith('paco.ref '): value = awslambda.code.s3_bucket + ".name" else: value = awslambda.code.s3_bucket s3bucket_param = self.create_cfn_parameter( name='CodeS3Bucket', description="An Amazon S3 bucket in the same AWS Region as your function. The bucket can be in a different AWS account.", param_type='String', value=value ) s3key_param = self.create_cfn_parameter( name='CodeS3Key', description="The Amazon S3 key of the deployment package.", param_type='String', value=awslambda.code.s3_key ) cfn_export_dict['Code'] = { 'S3Bucket': troposphere.Ref(s3bucket_param), 'S3Key': troposphere.Ref(s3key_param), } else: zip_path = Path(awslambda.code.zipfile) if zip_path.is_file(): cfn_export_dict['Code'] = { 'ZipFile': zip_path.read_text() } elif zip_path.is_dir(): # get S3Bucket/S3Key or if it does not exist, it will create the bucket and artifact # and then upload the artifact bucket_name, artifact_name = init_lambda_code( self.paco_ctx.paco_buckets, self.stack.resource, awslambda.code.zipfile, self.stack.account_ctx, self.stack.aws_region, ) s3bucket_param = self.create_cfn_parameter( name='CodeS3Bucket', description="The Paco S3 Bucket for configuration", param_type='String', value=bucket_name ) s3key_param = self.create_cfn_parameter( name='CodeS3Key', description="The Lambda code artifact S3 Key.", param_type='String', value=artifact_name ) cfn_export_dict['Code'] = { 'S3Bucket': troposphere.Ref(s3bucket_param), 'S3Key': troposphere.Ref(s3key_param), } # Environment variables var_export = {} if awslambda.environment != None and awslambda.environment.variables != None: for var in awslambda.environment.variables: name = var.key.replace('_','') env_param = self.create_cfn_parameter( name='EnvVar{}'.format(name), param_type='String', description='Env var for {}'.format(name), value=var.value, ) var_export[var.key] = troposphere.Ref(env_param) if awslambda.sdb_cache == True: var_export['SDB_CACHE_DOMAIN'] = troposphere.Ref('LambdaSDBCacheDomain') if len(awslambda.log_group_names) > 0: # Add PACO_LOG_GROUPS Environment Variable paco_log_groups = [ prefixed_name(awslambda, loggroup_name, self.paco_ctx.legacy_flag) for loggroup_name in awslambda.log_group_names ] paco_log_groups_param = self.create_cfn_parameter( name='EnvVariablePacoLogGroups', param_type='String', description='Env var for Paco Log Groups', value=','.join(paco_log_groups), ) var_export['PACO_LOG_GROUPS'] = troposphere.Ref(paco_log_groups_param) cfn_export_dict['Environment'] = { 'Variables': var_export } # Lambda resource self.awslambda_resource = troposphere.awslambda.Function.from_dict( 'Function', cfn_export_dict ) self.template.add_resource(self.awslambda_resource) # SDB Cache with SDB Domain and SDB Domain Policy resources if awslambda.sdb_cache == True: sdb_domain_resource = troposphere.sdb.Domain( title='LambdaSDBCacheDomain', template=self.template, Description="Lambda Function Domain" ) sdb_policy = troposphere.iam.Policy( title='LambdaSDBCacheDomainPolicy', template=self.template, PolicyName='SDBDomain', PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Action=[Action("sdb","*")], Resource=[ troposphere.Sub( 'arn:aws:sdb:${AWS::Region}:${AWS::AccountId}:domain/${DomainName}', DomainName=troposphere.Ref('LambdaSDBCacheDomain') ) ], ) ], Roles=troposphere.Ref(role_arn_param) ) ) sdb_policy.DependsOn = sdb_domain_resource self.awslambda_resource.DependsOn = sdb_domain_resource # Permissions # SNS Topic Lambda permissions and subscription idx = 1 for sns_topic_ref in awslambda.sns_topics: # SNS Topic Arn parameters param_name = 'SNSTopicArn%d' % idx self.create_cfn_parameter( name=param_name, param_type='String', description='An SNS Topic ARN to grant permission to.', value=sns_topic_ref + '.arn' ) # Lambda permission troposphere.awslambda.Permission( title=param_name + 'Permission', template=self.template, Action="lambda:InvokeFunction", FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'), Principal='sns.amazonaws.com', SourceArn=troposphere.Ref(param_name), ) # SNS Topic subscription sns_topic = get_model_obj_from_ref(sns_topic_ref, self.paco_ctx.project) troposphere.sns.SubscriptionResource( title=param_name + 'Subscription', template=self.template, Endpoint=troposphere.GetAtt(self.awslambda_resource, 'Arn'), Protocol='lambda', TopicArn=troposphere.Ref(param_name), Region=sns_topic.region_name ) idx += 1 # Lambda permissions for connected Paco resources app = get_parent_by_interface(awslambda, schemas.IApplication) for obj in get_all_nodes(app): # S3 Bucket notification permission(s) if schemas.IS3Bucket.providedBy(obj): seen = {} if hasattr(obj, 'notifications'): if hasattr(obj.notifications, 'lambdas'): for lambda_notif in obj.notifications.lambdas: if lambda_notif.function == awslambda.paco_ref: # yes, this Lambda gets notification from this S3Bucket group = get_parent_by_interface(obj, schemas.IResourceGroup) s3_logical_name = self.gen_cf_logical_name(group.name + obj.name, '_') if s3_logical_name not in seen: troposphere.awslambda.Permission( title='S3Bucket' + s3_logical_name, template=self.template, Action="lambda:InvokeFunction", FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'), Principal='s3.amazonaws.com', SourceArn='arn:aws:s3:::' + obj.get_bucket_name(), ) seen[s3_logical_name] = True # Events Rule permission(s) if schemas.IEventsRule.providedBy(obj): seen = {} for target in obj.targets: target_ref = Reference(target.target) target_ref.set_account_name(account_ctx.get_name()) target_ref.set_region(aws_region) lambda_ref = Reference(awslambda.paco_ref) if target_ref.raw == lambda_ref.raw: # yes, the Events Rule has a Target that is this Lambda group = get_parent_by_interface(obj, schemas.IResourceGroup) eventsrule_logical_name = self.gen_cf_logical_name(group.name + obj.name, '_') if eventsrule_logical_name not in seen: rule_name = create_event_rule_name(obj) # rule_name = self.create_cfn_logical_id("EventsRule" + obj.paco_ref) # rule_name = hash_smaller(rule_name, 64) source_arn = 'arn:aws:events:{}:{}:rule/{}'.format( aws_region, account_ctx.id, rule_name ) troposphere.awslambda.Permission( title='EventsRule' + eventsrule_logical_name, template=self.template, Action="lambda:InvokeFunction", FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'), Principal='events.amazonaws.com', SourceArn=source_arn, ) seen[eventsrule_logical_name] = True # IoT Analytics permission(s) if schemas.IIoTAnalyticsPipeline.providedBy(obj): seen = {} for activity in obj.pipeline_activities.values(): if activity.activity_type == 'lambda': target_ref = Reference(activity.function) target_ref.set_account_name(account_ctx.get_name()) target_ref.set_region(aws_region) lambda_ref = Reference(awslambda.paco_ref) if target_ref.raw == lambda_ref.raw: # yes, the IoT Analytics Lambda Activity has a ref to this Lambda group = get_parent_by_interface(obj, schemas.IResourceGroup) iotap_logical_name = self.gen_cf_logical_name(group.name + obj.name, '_') if iotap_logical_name not in seen: rule_name = create_event_rule_name(obj) troposphere.awslambda.Permission( title='IoTAnalyticsPipeline' + iotap_logical_name, template=self.template, Action="lambda:InvokeFunction", FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'), Principal='iotanalytics.amazonaws.com', ) seen[iotap_logical_name] = True # Log group(s) loggroup_function_name = troposphere.Join( '', [ '/aws/lambda/', troposphere.Select( 6, troposphere.Split(':', troposphere.GetAtt(self.awslambda_resource, 'Arn')) ) ] ) loggroup_resources = [] loggroup_resources.append( self.add_log_group(loggroup_function_name, 'lambda') ) if len(awslambda.log_group_names) > 0: # Additional App-specific LogGroups for loggroup_name in awslambda.log_group_names: # Add LogGroup to the template prefixed_loggroup_name = prefixed_name(awslambda, loggroup_name, self.paco_ctx.legacy_flag) loggroup_resources.append( self.add_log_group(prefixed_loggroup_name) ) # LogGroup permissions log_group_arns = [ troposphere.Join(':', [ f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group', loggroup_function_name, '*' ]) ] log_stream_arns = [ troposphere.Join(':', [ f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group', loggroup_function_name, 'log-stream', '*' ]) ] for loggroup_name in awslambda.log_group_names: prefixed_loggroup_name = prefixed_name(awslambda, loggroup_name, self.paco_ctx.legacy_flag) log_group_arns.append( f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group:{prefixed_loggroup_name}:*' ) log_stream_arns.append( f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group:{prefixed_loggroup_name}:log-stream:*' ) loggroup_policy_resource = troposphere.iam.ManagedPolicy( title='LogGroupManagedPolicy', PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Sid='AllowLambdaModifyLogStreams', Effect=Allow, Action=[ Action("logs","CreateLogStream"), Action("logs","DescribeLogStreams"), ], Resource=log_group_arns, ), Statement( Sid='AllowLambdaPutLogEvents', Effect=Allow, Action=[ Action("logs","PutLogEvents"), ], Resource=log_stream_arns, ), ], ), Roles=[troposphere.Ref(role_name_param)], ) loggroup_policy_resource.DependsOn = loggroup_resources self.template.add_resource(loggroup_policy_resource) # Outputs self.create_output( title='FunctionName', value=troposphere.Ref(self.awslambda_resource), ref=awslambda.paco_ref_parts + '.name', ) self.create_output( title='FunctionArn', value=troposphere.GetAtt(self.awslambda_resource, 'Arn'), ref=awslambda.paco_ref_parts + '.arn', )
def add_alarms( self, template, alarms, resource, project, alarm_id, alarm_set_id, ): alarms_are_enabled = False # Dimension Parameters # First calculate multiple parameters for complex resources with multiple sub-resources like IoTAnalyticsPipeline if schemas.IIoTAnalyticsPipeline.providedBy(resource): params_needed = {} dataset_params = {} for alarm in alarms: if alarm.metric_name not in params_needed: params_needed[alarm.metric_name] = [alarm] else: params_needed[alarm.metric_name].append(alarm) for metric_name, alarms in params_needed.items(): if metric_name == 'IncomingMessages': value = resource.paco_ref + '.channel.name' pipeline_name_param = self.create_cfn_parameter( name='ChannelName', param_type='String', description='The ChannelName for the dimension.', value=value ) elif metric_name == 'ActivityExecutionError': value = resource.paco_ref + '.pipeline.name' pipeline_name_param = self.create_cfn_parameter( name='PipelineName', param_type='String', description='The PipelineName for the dimension.', value=value ) elif metric_name == 'ActionExecution': dataset_names = {} for alarm in alarms: for dimension in alarm.dimensions: if dimension.name.lower() == 'datasetname': dataset_names[dimension.value] = None for dataset_name in dataset_names: value = f'{resource.paco_ref}.dataset.{dataset_name}.name' dataset_params[dataset_name] = self.create_cfn_parameter( name=f'{dataset_name}DatasetName', param_type='String', description=f'The DatasetName for {dataset_name}.', value=value ) # stash the dataset name on the alarm so it can be used to create the Dimension alarm._dataset_param = dataset_params[dataset_name] # simple Resources with a single name and dimension elif schemas.IResource.providedBy(resource): value = resource.paco_ref + '.name' if schemas.IElastiCacheRedis.providedBy(resource): # Primary node uses the aws name with '-001' appended to it # ToDo: how to have Alarms for the read replica nodes? value = resource.get_aws_name() + '-001' dimension_param = self.create_cfn_parameter( name='DimensionResource', param_type='String', description='The resource id or name for the metric dimension.', value=value ) # DBCluster with DBInstances elif schemas.IRDSClusterInstance.providedBy(resource): value = f"{resource.dbcluster.paco_ref}.db_instances.{resource.name}.name" dimension_param = self.create_cfn_parameter( name='DimensionResource', param_type='String', description='The resource id or name for the metric dimension.', value=value ) for alarm in alarms: if alarm.enabled == True: alarms_are_enabled = True else: continue if len(alarm.dimensions) > 0: for dimension in alarm.dimensions: dimension.parameter = self.create_cfn_parameter( name='DimensionResource{}{}'.format(alarm.cfn_resource_name, dimension.name), param_type='String', description='The resource id or name for the metric dimension.', value=dimension.value, ) # compute dynamic attributes for cfn_export_dict alarm_export_dict = alarm.cfn_export_dict self.set_alarm_actions_to_cfn_export(alarm, alarm_export_dict) # AlarmDescription notification_cfn_refs = self.create_notification_params(alarm) alarm_export_dict['AlarmDescription'] = alarm.get_alarm_description(notification_cfn_refs) # Namespace if not alarm.namespace: if schemas.ICloudWatchLogAlarm.providedBy(alarm): # Namespace look-up for LogAlarms obj = get_parent_by_interface(alarm, schemas.IMonitorConfig) try: log_group = obj.log_sets[alarm.log_set_name].log_groups[alarm.log_group_name] except KeyError: raise InvalidLogSetConfiguration(""" Invalid Log Set configuration: Log Set: {} Log Group: {} Resource: {} (type: {}) Resource paco.ref: {} HINT: Ensure that the monitoring.log_sets for the resource is enabled and that the Log Set and Log Group names match. """.format(alarm.log_set_name, alarm.log_group_name, resource.name, resource.type, resource.paco_ref) ) alarm_export_dict['Namespace'] = "Paco/" + prefixed_name( resource, log_group.get_full_log_group_name(), self.paco_ctx.legacy_flag ) else: # if not supplied default to the Namespace for the Resource type alarm_export_dict['Namespace'] = vocabulary.cloudwatch[resource.type]['namespace'] else: # Use the Namespace as directly supplied alarm_export_dict['Namespace'] = alarm.namespace # Dimensions # if there are no dimensions, then fallback to the default of # a primary dimension and the resource's resource_name # This only happens for Resource-level Alarms # MetricFilter LogGroup Alarms must have no dimensions dimensions = [] if not schemas.ICloudWatchLogAlarm.providedBy(alarm): # simple metric Resources with a single Dimension based on the resource type if schemas.IResource.providedBy(resource) and len(alarm.dimensions) < 1: dimensions.append( {'Name': vocabulary.cloudwatch[resource.type]['dimension'], 'Value': troposphere.Ref(dimension_param)} ) elif schemas.IRDSClusterInstance.providedBy(resource) and len(alarm.dimensions) < 1: dimensions.append( {'Name': vocabulary.cloudwatch[resource.type]['dimension'], 'Value': troposphere.Ref(dimension_param)} ) # complex metric Resources that have more than one Dimension to select elif schemas.IASG.providedBy(resource) or schemas.IIoTTopicRule.providedBy(resource): dimensions.append( {'Name': vocabulary.cloudwatch[resource.type]['dimension'], 'Value': troposphere.Ref(dimension_param)} ) elif schemas.IIoTAnalyticsPipeline.providedBy(resource): # IoTAnalyticsPipeline can alarm on Channel, Pipeline, Datastore and Dataset dimensions if alarm.metric_name == 'ActivityExecutionError': dimensions.append( {'Name': 'PipelineName', 'Value': troposphere.Ref(pipeline_name_param)} ) elif alarm.metric_name == 'IncomingMessages': dimensions.append( {'Name': 'ChannelName', 'Value': troposphere.Ref(channel_name_param)} ) elif alarm.metric_name == 'ActionExecution': dimensions.append( {'Name': 'DatasetName', 'Value': troposphere.Ref(alarm._dataset_param)} ) else: raise InvalidAlarmConfiguration(f"Unsuported metric_name '{alarm.metric_name}' specified for IoTAnalyticsPipeline alarm:\n{alarm.paco_ref_parts}") # Add ClientId (account id) dimension for ElasticsearchDomain if schemas.IElasticsearchDomain.providedBy(resource): dimensions.append( {'Name': 'ClientId', 'Value': self.stack.account_ctx.id } ) for dimension in alarm.dimensions: if schemas.IIoTAnalyticsPipeline.providedBy(resource) and dimension.name == 'DatasetName': continue dimensions.append( {'Name': dimension.name, 'Value': troposphere.Ref(dimension.parameter)} ) alarm_export_dict['Dimensions'] = dimensions # Add Alarm resource alarm_resource = troposphere.cloudwatch.Alarm.from_dict( alarm.cfn_resource_name, alarm_export_dict ) template.add_resource(alarm_resource) # Alarm Output output_ref = '.'.join([resource.paco_ref_parts, 'monitoring', 'alarm_sets', alarm_set_id, alarm_id]) self.create_output( title=alarm.cfn_resource_name, value=troposphere.Ref(alarm_resource), ref=output_ref, ) return alarms_are_enabled
def add_alarms( self, template, alarms, resource, res_config_ref, project, alarm_id, alarm_set_id, ): # Add Parameters if schemas.IResource.providedBy(resource): value = resource.paco_ref + '.name' if schemas.IElastiCacheRedis.providedBy(resource): # Primary node uses the aws name with '-001' appended to it # ToDo: how to have Alarms for the read replica nodes? value = resource.get_aws_name() + '-001' dimension_param = self.create_cfn_parameter( name='DimensionResource', param_type='String', description='The resource id or name for the metric dimension.', value=value) alarms_are_enabled = False for alarm in alarms: if alarm.enabled == True: alarms_are_enabled = True else: continue if len(alarm.dimensions) > 0: for dimension in alarm.dimensions: dimension.parameter = self.create_cfn_parameter( name='DimensionResource{}{}'.format( alarm.cfn_resource_name, dimension.name), param_type='String', description= 'The resource id or name for the metric dimension.', value=dimension.value, ) # compute dynamic attributes for cfn_export_dict alarm_export_dict = alarm.cfn_export_dict self.set_alarm_actions_to_cfn_export(alarm, alarm_export_dict) # AlarmDescription notification_cfn_refs = self.create_notification_params(alarm) alarm_export_dict[ 'AlarmDescription'] = alarm.get_alarm_description( notification_cfn_refs) # Namespace if not alarm.namespace: if schemas.ICloudWatchLogAlarm.providedBy(alarm): # Namespace look-up for LogAlarms obj = get_parent_by_interface(alarm, schemas.IMonitorConfig) try: log_group = obj.log_sets[ alarm.log_set_name].log_groups[ alarm.log_group_name] except KeyError: raise InvalidLogSetConfiguration(""" Invalid Log Set configuration: Log Set: {} Log Group: {} Resource: {} (type: {}) Resource paco.ref: {} HINT: Ensure that the monitoring.log_sets for the resource is enabled and that the Log Set and Log Group names match. """.format(alarm.log_set_name, alarm.log_group_name, resource.name, resource.type, resource.paco_ref)) alarm_export_dict['Namespace'] = "Paco/" + prefixed_name( resource, log_group.get_full_log_group_name(), self.paco_ctx.legacy_flag) else: # if not supplied default to the Namespace for the Resource type alarm_export_dict['Namespace'] = vocabulary.cloudwatch[ resource.type]['namespace'] else: # Use the Namespace as directly supplied alarm_export_dict['Namespace'] = alarm.namespace # Dimensions # if there are no dimensions, then fallback to the default of # a primary dimension and the resource's resource_name # This only happens for Resource-level Alarms # MetricFilter LogGroup Alarms must have no dimensions dimensions = [] if not schemas.ICloudWatchLogAlarm.providedBy(alarm): if schemas.IResource.providedBy(resource) and len( alarm.dimensions) < 1: dimensions.append({ 'Name': vocabulary.cloudwatch[resource.type]['dimension'], 'Value': troposphere.Ref(dimension_param) }) elif schemas.IASG.providedBy(resource): dimensions.append({ 'Name': vocabulary.cloudwatch[resource.type]['dimension'], 'Value': troposphere.Ref(dimension_param) }) for dimension in alarm.dimensions: dimensions.append({ 'Name': dimension.name, 'Value': troposphere.Ref(dimension.parameter) }) alarm_export_dict['Dimensions'] = dimensions # Add Alarm resource alarm_resource = troposphere.cloudwatch.Alarm.from_dict( alarm.cfn_resource_name, alarm_export_dict) template.add_resource(alarm_resource) # Alarm Output output_ref = '.'.join([ res_config_ref, 'monitoring', 'alarm_sets', alarm_set_id, alarm_id ]) self.create_output( title=alarm.cfn_resource_name, value=troposphere.Ref(alarm_resource), ref=output_ref, ) return alarms_are_enabled
def __init__(self, paco_ctx, account_ctx, aws_region, stack_group, stack_tags, group_name, resource, res_config_ref): super().__init__(paco_ctx, account_ctx, aws_region, config_ref=res_config_ref, stack_group=stack_group, stack_tags=stack_tags) self.set_aws_name('LogGroups', group_name, resource.name) self.resource = resource # Troposphere Template Initialization self.init_template('LogGroups') template = self.template cw_logging = get_parent_by_interface(resource, schemas.IProject)['cw_logging'] default_retention = cw_logging.expire_events_after_days for log_group in self.resource.monitoring.log_sets.get_all_log_groups( ): cfn_export_dict = {} log_group_name = log_group.get_full_log_group_name() prefixed_log_group_name = prefixed_name(resource, log_group_name, self.paco_ctx.legacy_flag) loggroup_logical_id = self.create_cfn_logical_id('LogGroup' + log_group_name) # provide prefixed LogGroup name as a CFN Parameter param_name = 'Name' + loggroup_logical_id log_group_name_parameter = self.create_cfn_parameter( param_type='String', name=param_name, description='LogGroup name', value=prefixed_log_group_name, ) cfn_export_dict['LogGroupName'] = troposphere.Ref( log_group_name_parameter) # override default retention? # 1. log_group.expire_events_after_days <- specific to single log group # 2. log_set.expire_events_after_days <- applies to an entire log set # 3. cw_logging.expire_events_after_days <- global default log_set = get_parent_by_interface(log_group, schemas.ICloudWatchLogSet) if hasattr(log_set, 'expire_events_after_days' ) and log_group.expire_events_after_days: retention = log_group.expire_events_after_days elif hasattr(log_set, 'expire_events_after_days' ) and log_set.expire_events_after_days: retention = log_set.expire_events_after_days else: retention = default_retention if retention != 'Never': cfn_export_dict["RetentionInDays"] = retention log_group_resource = troposphere.logs.LogGroup.from_dict( loggroup_logical_id, cfn_export_dict) template.add_resource(log_group_resource) # Metric Filters for metric_filter in log_group.metric_filters.values(): mf_dict = { 'LogGroupName': troposphere.Ref(log_group_name_parameter), 'FilterPattern': metric_filter.filter_pattern, } mt_list = [] for transf in metric_filter.metric_transformations: # If MetricNamespace is not set, use a dynamic 'Paco/{log-group-name}' namespace if transf.metric_namespace: namespace = transf.metric_namespace else: namespace = 'Paco/' + prefixed_log_group_name mts_dict = { 'MetricName': transf.metric_name, 'MetricNamespace': namespace, 'MetricValue': transf.metric_value } if type(transf.default_value) == type(float): mts_dict['DefaultValue'] = transf.default_value mt_list.append(mts_dict) mf_dict['MetricTransformations'] = mt_list metric_filter_resource = troposphere.logs.MetricFilter.from_dict( self.create_cfn_logical_id('MetricFilter' + metric_filter.name), mf_dict, ) metric_filter_resource.DependsOn = log_group_resource template.add_resource(metric_filter_resource) # Generate the Template self.set_template()
def __init__(self, stack, paco_ctx): super().__init__(stack, paco_ctx) self.set_aws_name('LogGroups', self.resource_group_name, self.resource_name) # Troposphere Template Initialization self.init_template('LogGroups') template = self.template # CloudWatch Agent logging cw_logging = get_parent_by_interface(stack.resource, schemas.IProject)['cw_logging'] default_retention = cw_logging.expire_events_after_days if schemas.IRDSAurora.providedBy(stack.resource): monitoring = stack.resource.default_instance.monitoring else: monitoring = stack.resource.monitoring for log_group in monitoring.log_sets.get_all_log_groups(): cfn_export_dict = {} log_group_name = log_group.get_full_log_group_name() if log_group.external_resource == False: prefixed_log_group_name = prefixed_name(stack.resource, log_group_name, self.paco_ctx.legacy_flag) else: prefixed_log_group_name = log_group_name loggroup_logical_id = self.create_cfn_logical_id('LogGroup' + log_group_name) # provide prefixed LogGroup name as a CFN Parameter param_name = 'Name' + loggroup_logical_id log_group_name_parameter = self.create_cfn_parameter( param_type='String', name=param_name, description='LogGroup name', value=prefixed_log_group_name, ) cfn_export_dict['LogGroupName'] = troposphere.Ref(log_group_name_parameter) # override default retention? # 1. log_group.expire_events_after_days <- specific to single log group # 2. log_set.expire_events_after_days <- applies to an entire log set # 3. cw_logging.expire_events_after_days <- global default log_set = get_parent_by_interface(log_group, schemas.ICloudWatchLogSet) if hasattr(log_set, 'expire_events_after_days') and log_group.expire_events_after_days: retention = log_group.expire_events_after_days elif hasattr(log_set, 'expire_events_after_days') and log_set.expire_events_after_days: retention = log_set.expire_events_after_days else: retention = default_retention if retention != 'Never': cfn_export_dict["RetentionInDays"] = retention # Avoid creating loggroup if it already exists as an external resource if log_group.external_resource == False: log_group_resource = troposphere.logs.LogGroup.from_dict( loggroup_logical_id, cfn_export_dict ) template.add_resource(log_group_resource) # Metric Filters for metric_filter in log_group.metric_filters.values(): mf_dict = { 'LogGroupName': troposphere.Ref(log_group_name_parameter), 'FilterPattern': metric_filter.filter_pattern, } mt_list = [] for transf in metric_filter.metric_transformations: # If MetricNamespace is not set, use a dynamic 'Paco/{log-group-name}' namespace if transf.metric_namespace: namespace = transf.metric_namespace else: namespace = 'Paco/' + prefixed_log_group_name mts_dict = { 'MetricName': transf.metric_name, 'MetricNamespace': namespace, 'MetricValue': transf.metric_value } if isinstance(transf.default_value, float): mts_dict['DefaultValue'] = transf.default_value mt_list.append(mts_dict) mf_dict['MetricTransformations'] = mt_list metric_filter_resource = troposphere.logs.MetricFilter.from_dict( self.create_cfn_logical_id('MetricFilter' + metric_filter.name), mf_dict, ) if log_group.external_resource == False: metric_filter_resource.DependsOn = log_group_resource template.add_resource(metric_filter_resource) # SSM Agent logging if schemas.IASG.providedBy(stack.resource): if stack.resource.launch_options.ssm_agent: loggroup_logical_id = 'SSMLogGroup' cfn_export_dict = {} # LogGroup name is prefixed as a CFN Parameter # ToDo: make paco_ssm a reserved word? prefixed_log_group_name = prefixed_name(stack.resource, 'paco_ssm', self.paco_ctx.legacy_flag) param_name = 'Name' + loggroup_logical_id log_group_name_parameter = self.create_cfn_parameter( param_type='String', name=param_name, description='SSM LogGroup name', value=prefixed_log_group_name, ) cfn_export_dict['LogGroupName'] = troposphere.Ref(log_group_name_parameter) retention = stack.resource.launch_options.ssm_expire_events_after_days if retention != 'Never': cfn_export_dict["RetentionInDays"] = retention log_group_resource = troposphere.logs.LogGroup.from_dict( loggroup_logical_id, cfn_export_dict ) template.add_resource(log_group_resource) # Sneak in setting the template so that we can add a stack hook # from a template stack.template = self stack.hooks.add( name='DeleteLogGroups.' + self.resource.name, stack_action=['delete'], stack_timing='post', hook_method=self.stack_hook_delete_log_groups, cache_method=None, hook_arg=monitoring )
def add_alarms(self, alarms): "Add a list of Alarms to the template" alarms_are_enabled = False dimension_parameters = {} resource = self.resource # Dimension Parameters: # Each Dimension Parameter may be shared by multiple Alarms in one template # # Resources with a single Dimension look like: # # - Resource Type: ASG # Dimensions: ['AutoScalingGroupName', 'my-asg-name'] # # There will be a single CloudFormation Parameter named 'DimensionResource' for these single Dimension resources. # Other Resources may need multiple Dimensions or can have sub-resources each with a different Dimension. # IoTAnalyticsPipeline: Dimensions for Channel, Pipeline and DataSet if schemas.IIoTAnalyticsPipeline.providedBy(resource): params_needed = {} for alarm in alarms: if alarm.metric_name not in params_needed: params_needed[alarm.metric_name] = [alarm] else: params_needed[alarm.metric_name].append(alarm) for metric_name, alarms in params_needed.items(): if metric_name == 'IncomingMessages': value = resource.paco_ref + '.channel.name' dimension_parameters[ 'ChannelName'] = self.create_cfn_parameter( name='ChannelName', param_type='String', description='The ChannelName for the dimension.', value=value) elif metric_name == 'ActivityExecutionError': value = resource.paco_ref + '.pipeline.name' dimension_parameters[ 'PipelineName'] = self.create_cfn_parameter( name='PipelineName', param_type='String', description='The PipelineName for the dimension.', value=value) elif metric_name == 'ActionExecution': dataset_names = {} for alarm in alarms: for dimension in alarm.dimensions: if dimension.name.lower() == 'datasetname': dataset_name = dimension.value cfn_name = self.create_cfn_logical_id( f'{dataset_name}DatasetName') dataset_names[dataset_name] = cfn_name alarm._param_name = cfn_name for dataset_name, cfn_name in dataset_names.items(): value = f'{resource.paco_ref}.dataset.{dataset_name}.name' dimension_parameters[ cfn_name] = self.create_cfn_parameter( name=cfn_name, param_type='String', description= f'The DatasetName for {dataset_name}.', value=value) # ECSServices need the ClusterName Dimension and the ServiceName for each Service elif schemas.IECSServices.providedBy(resource): dimension_parameters['ClusterName'] = self.create_cfn_parameter( name='ClusterName', param_type='String', description='The ClusterName dimension.', value=resource.cluster + '.name') for service in resource.services.values(): name = self.create_cfn_logical_id(f'ServiceName{service.name}') service._dimension_name = name dimension_parameters[name] = self.create_cfn_parameter( name=name, param_type='String', description='ServiceName for the Dimension.', value=service.paco_ref + '.name') # DBCluster with DBInstances elif schemas.IRDSClusterInstance.providedBy(resource): value = f"{resource.dbcluster.paco_ref}.db_instances.{resource.name}.name" dimension_parameters['ResourceName'] = self.create_cfn_parameter( name='DimensionResource', param_type='String', description='The resource id or name for the metric dimension.', value=value) # simple Resources with a single name and dimension elif schemas.IResource.providedBy(resource): value = resource.paco_ref + '.name' if schemas.IElastiCacheRedis.providedBy(resource): # Primary node uses the aws name with '-001' appended to it # ToDo: how to have Alarms for the read replica nodes? value = resource.get_aws_name() + '-001' dimension_parameters['ResourceName'] = self.create_cfn_parameter( name='DimensionResource', param_type='String', description='The resource id or name for the metric dimension.', value=value) for alarm in alarms: # Track if any alarms are enabled if alarm.enabled == True: alarms_are_enabled = True else: continue # Alarm hook: registered by Services using paco.extend.add_cw_alarm_hook for hook in CW_ALARM_HOOKS: hook(alarm) # Dimension Parameters if len(alarm.dimensions) > 0: for dimension in alarm.dimensions: dimension.parameter = self.create_cfn_parameter( name='DimensionResource{}{}'.format( alarm.cfn_resource_name, dimension.name), param_type='String', description= 'The resource id or name for the metric dimension.', value=dimension.value, ) # compute dynamic attributes for cfn_export_dict alarm_export_dict = alarm.cfn_export_dict self.set_alarm_actions_to_cfn_export(alarm, alarm_export_dict) # AlarmDescription notification_cfn_refs = self.create_notification_params(alarm) alarm_export_dict[ 'AlarmDescription'] = alarm.get_alarm_description( notification_cfn_refs) # Namespace if not alarm.namespace: if schemas.ICloudWatchLogAlarm.providedBy(alarm): # Namespace look-up for LogAlarms obj = get_parent_by_interface(alarm, schemas.IMonitorConfig) try: log_group = obj.log_sets[ alarm.log_set_name].log_groups[ alarm.log_group_name] except KeyError: raise InvalidLogSetConfiguration(""" Invalid Log Set configuration: Log Set: {} Log Group: {} Resource: {} (type: {}) Resource paco.ref: {} HINT: Ensure that the monitoring.log_sets for the resource is enabled and that the Log Set and Log Group names match. """.format(alarm.log_set_name, alarm.log_group_name, resource.name, resource.type, resource.paco_ref)) if log_group.external_resource == False: prefixed_log_group_name = prefixed_name( resource, log_group.get_full_log_group_name(), self.paco_ctx.legacy_flag) else: prefixed_log_group_name = log_group.get_full_log_group_name( ) alarm_export_dict[ 'Namespace'] = "Paco/" + prefixed_log_group_name else: # if not supplied default to the Namespace for the Resource type if 'namespace_by_metric_name' in vocabulary.cloudwatch[ resource.type].keys( ) and alarm.metric_name in vocabulary.cloudwatch[ resource. type]['namespace_by_metric_name'].keys(): alarm_export_dict['Namespace'] = vocabulary.cloudwatch[ resource.type]['namespace_by_metric_name'][ alarm.metric_name] else: alarm_export_dict['Namespace'] = vocabulary.cloudwatch[ resource.type]['namespace'] else: # Use the Namespace as directly supplied alarm_export_dict['Namespace'] = alarm.namespace # Dimensions # If there are no dimensions, then fallback to the default of a primary Dimension and the resource's resource_name # This only happens for Resource-level Alarms. MetricFilter LogGroup Alarms have no Dimensions dimensions = [] if not schemas.ICloudWatchLogAlarm.providedBy(alarm): if schemas.IRDSClusterInstance.providedBy(resource) and len( alarm.dimensions) < 1: dimensions.append({ 'Name': vocabulary.cloudwatch[resource.type]['dimension'], 'Value': troposphere.Ref(dimension_parameters['ResourceName']) }) elif schemas.IECSServices.providedBy(resource): dimensions.append({ 'Name': 'ClusterName', 'Value': troposphere.Ref(dimension_parameters['ClusterName']) }) dimensions.append({ 'Name': 'ServiceName', 'Value': troposphere.Ref(dimension_parameters[ alarm._service._dimension_name]) }) # complex metric Resources that have more than one Dimension to select elif schemas.IASG.providedBy( resource) or schemas.IIoTTopicRule.providedBy( resource): dimensions.append({ 'Name': vocabulary.cloudwatch[resource.type]['dimension'], 'Value': troposphere.Ref(dimension_parameters['ResourceName']) }) elif schemas.IIoTAnalyticsPipeline.providedBy(resource): # IoTAnalyticsPipeline can alarm on Channel, Pipeline, Datastore and Dataset dimensions if alarm.metric_name == 'ActivityExecutionError': dimensions.append({ 'Name': 'PipelineName', 'Value': troposphere.Ref( dimension_parameters['PipelineName']) }) elif alarm.metric_name == 'IncomingMessages': dimensions.append({ 'Name': 'ChannelName', 'Value': troposphere.Ref( dimension_parameters['ChannelName']) }) elif alarm.metric_name == 'ActionExecution': dimensions.append({ 'Name': 'DatasetName', 'Value': troposphere.Ref( dimension_parameters[alarm._param_name]) }) else: raise InvalidAlarmConfiguration( f"Unsuported metric_name '{alarm.metric_name}' specified for IoTAnalyticsPipeline alarm:\n{alarm.paco_ref_parts}" ) elif schemas.IResource.providedBy(resource) and len( alarm.dimensions) < 1: # Resources with a single Dimension based on the resource type dimensions.append({ 'Name': vocabulary.cloudwatch[resource.type]['dimension'], 'Value': troposphere.Ref(dimension_parameters['ResourceName']) }) # Add ClientId (account id) dimension for ElasticsearchDomain if schemas.IElasticsearchDomain.providedBy(resource): dimensions.append({ 'Name': 'ClientId', 'Value': self.stack.account_ctx.id }) for dimension in alarm.dimensions: if schemas.IIoTAnalyticsPipeline.providedBy( resource) and dimension.name == 'DatasetName': continue dimensions.append({ 'Name': dimension.name, 'Value': troposphere.Ref(dimension.parameter) }) alarm_export_dict['Dimensions'] = dimensions # Add Alarm resource alarm_resource = troposphere.cloudwatch.Alarm.from_dict( alarm.cfn_resource_name, alarm_export_dict) self.template.add_resource(alarm_resource) # Alarm Output output_ref = '.'.join([ resource.paco_ref_parts, 'monitoring', 'alarm_sets', alarm.__parent__.name, alarm.name ]) self.create_output( title=alarm.cfn_resource_name, value=troposphere.Ref(alarm_resource), ref=output_ref, ) return alarms_are_enabled
def __init__(self, stack, paco_ctx, task_execution_role): ecs_config = stack.resource super().__init__(stack, paco_ctx) self.set_aws_name('ECS Services', self.resource_group_name, self.resource.name) self.init_template('Elastic Container Service (ECS) Services and TaskDefinitions') if not ecs_config.is_enabled(): return # Task Execution Role task_execution_role_param = self.create_cfn_parameter( name='TaskExecutionRole', param_type='String', description='Task Execution Role', value=task_execution_role.get_arn(), ) # TaskDefinitions for task in ecs_config.task_definitions.values(): task_dict = task.cfn_export_dict task_dict['ExecutionRoleArn'] = troposphere.Ref(task_execution_role_param) index = 0 task._depends_on = [] for container_definition in task.container_definitions.values(): # ContainerDefinition Environment variables for env_pair in container_definition.environment: key = env_pair.name value = env_pair.value # only paco refs are passed as Parameters to avoid tripping the 60 Parameter CloudFormation limit if references.is_ref(value): if type(value) == type(str()): param_type = 'String' elif type(value) == type(int()) or type(value) == type(float()): param_type = 'Number' else: raise UnsupportedCloudFormationParameterType( "Can not cast {} of type {} to a CloudFormation Parameter type.".format( value, type(value) ) ) param_name = self.create_cfn_logical_id(f'{task.name}{container_definition.name}{key}') environment_param = self.create_cfn_parameter( param_type=param_type, name=param_name, description=f'Environment variable for container definition {container_definition.name} for task definition {task.name}', value=value, ) value = troposphere.Ref(environment_param) if 'Environment' not in task_dict['ContainerDefinitions'][index]: task_dict['ContainerDefinitions'][index]['Environment'] = [] task_dict['ContainerDefinitions'][index]['Environment'].append({'Name': key, 'Value': value}) # Image can be a paco.ref to an ECR Repository if references.is_ref(container_definition.image): param_name = self.create_cfn_logical_id(f'{task.name}{container_definition.name}Image') image_arn_param = self.create_cfn_parameter( param_type='String', name=param_name, description=f'Image used to start the container.', value=container_definition.image + '.arn', ) # The ECR URL needs to break apart the ARN and re-assemble it as the URL is no provided as a Stack Output :( task_dict['ContainerDefinitions'][index]['Image'] = troposphere.Join( ':', [ troposphere.Join( '/', [ # domain portion: aws_account_id.dkr.ecr.region.amazonaws.com troposphere.Join( '.', [ troposphere.Select(4, troposphere.Split(':', troposphere.Ref(image_arn_param))), # account id 'dkr', 'ecr', troposphere.Select(3, troposphere.Split(':', troposphere.Ref(image_arn_param))), # region 'amazonaws', 'com', ] ), troposphere.Select(1, troposphere.Split('/', troposphere.Ref(image_arn_param))) # ecr-repo-name ] ), container_definition.image_tag # image tag ] ) else: task_dict['ContainerDefinitions'][index]['Image'] = container_definition.image if getattr(container_definition, 'logging') != None: task_dict['ContainerDefinitions'][index]['LogConfiguration'] = {} log_dict = task_dict['ContainerDefinitions'][index]['LogConfiguration'] log_dict['LogDriver'] = container_definition.logging.driver # Only awslogs supported for now if container_definition.logging.driver == 'awslogs': log_dict['Options'] = {} log_dict['Options']['awslogs-region'] = troposphere.Ref('AWS::Region') prefixed_log_group_name = prefixed_name(container_definition, task.name) log_group_resource = self.add_log_group(prefixed_log_group_name, container_definition.logging.expire_events_after_days) log_dict['Options']['awslogs-group'] = troposphere.Ref(log_group_resource) task._depends_on.append(log_group_resource) log_dict['Options']['awslogs-stream-prefix'] = container_definition.name index += 1 # Setup Secrets for task_dict_container_def in task_dict['ContainerDefinitions']: if 'Secrets' in task_dict_container_def: for secrets_pair in task_dict_container_def['Secrets']: # Secerts Arn Parameters name_hash = md5sum(str_data=secrets_pair['ValueFrom']) secret_param_name = 'TaskDefinitionSecretArn'+name_hash secret_param = self.create_cfn_parameter( param_type='String', name=secret_param_name, description='The arn of the Secrets Manger Secret.', value=secrets_pair['ValueFrom']+'.arn' ) secrets_pair['ValueFrom'] = '!ManualTroposphereRef '+secret_param_name task_res = troposphere.ecs.TaskDefinition.from_dict( self.create_cfn_logical_id('TaskDefinition' + task.name), task_dict, ) task_res.DependsOn = task._depends_on self.template.add_resource(task_res) task._troposphere_res = task_res # Cluster Param cluster_param = self.create_cfn_parameter( name='Cluster', param_type='String', description='Cluster Name', value=ecs_config.cluster + '.name', ) # Services # ToDo: allow multiple PrivateDnsNamespaces? # e.g. if multiple ECSServices want to particpate in the same PrivateDnsNamespace? if ecs_config.service_discovery_namespace_name != '': private_dns_vpc_param = self.create_cfn_parameter( param_type='String', name='PrivateDnsNamespaceVpc', description='The Vpc for the Service Discovery Private DNS Namespace.', value='paco.ref ' + '.'.join(ecs_config.paco_ref_parts.split('.')[:4]) + '.network.vpc.id' ) private_dns_namespace_res = troposphere.servicediscovery.PrivateDnsNamespace( title=self.create_cfn_logical_id(f'DiscoveryService{ecs_config.service_discovery_namespace_name}'), Name=ecs_config.service_discovery_namespace_name, Vpc=troposphere.Ref(private_dns_vpc_param), ) self.template.add_resource(private_dns_namespace_res) for service in ecs_config.services.values(): service_dict = service.cfn_export_dict # Service Discovery if service.hostname != None: service_discovery_res = troposphere.servicediscovery.Service( title=self.create_cfn_logical_id(f'DiscoveryService{service.name}'), DnsConfig=troposphere.servicediscovery.DnsConfig( DnsRecords=[ # troposphere.servicediscovery.DnsRecord( # TTL='60', # Type='A' # ), troposphere.servicediscovery.DnsRecord( TTL='60', Type='SRV' ) ] ), HealthCheckCustomConfig=troposphere.servicediscovery.HealthCheckCustomConfig(FailureThreshold=float(1)), NamespaceId=troposphere.Ref(private_dns_namespace_res), Name=service.name, ) service_discovery_res.DependsOn = [private_dns_namespace_res] self.template.add_resource(service_discovery_res) service_dict['ServiceRegistries'] = [] for load_balancer in service.load_balancers: service_registry_dict = { 'RegistryArn': troposphere.GetAtt(service_discovery_res, 'Arn'), 'ContainerName': load_balancer.container_name, 'ContainerPort': load_balancer.container_port, } # ToDo: add Port when needed ... 'Port': ?, service_dict['ServiceRegistries'].append(service_registry_dict) # convert TargetGroup ref to a Parameter lb_idx = 0 if 'LoadBalancers' in service_dict: for lb in service_dict['LoadBalancers']: target_group_ref = lb['TargetGroupArn'] tg_param = self.create_cfn_parameter( name=self.create_cfn_logical_id(f'TargetGroup{service.name}{lb_idx}'), param_type='String', description='Target Group ARN', value=target_group_ref + '.arn', ) lb['TargetGroupArn'] = troposphere.Ref(tg_param) lb_idx += 1 # Replace TaskDefinition name with a TaskDefinition ARN if 'TaskDefinition' in service_dict: service_dict['TaskDefinition'] = troposphere.Ref( ecs_config.task_definitions[service_dict['TaskDefinition']]._troposphere_res ) service_dict['Cluster'] = troposphere.Ref(cluster_param) service_res = troposphere.ecs.Service.from_dict( self.create_cfn_logical_id('Service' + service.name), service_dict ) # Outputs self.create_output( title=service_res.title + 'Name', description="Service Name", value=troposphere.GetAtt(service_res, 'Name'), ref=service.paco_ref_parts + ".name" ) self.template.add_resource(service_res)