def processed_document(self): if self._document != None: return self._document # resolve variable references and replace with resolved value # ToDo: only looks up Stack output values? for key, var in self.iotpolicy.variables.items(): if references.is_ref(var): ref_value = references.resolve_ref(var, self.project) if isinstance(ref_value, Stack): output_key = ref_value.get_outputs_key_from_ref( references.Reference(var)) ref_value = ref_value.get_outputs_value(output_key) self.iotpolicy.variables[key] = ref_value # replace ${variable} strings def var_replace(match): value = match.groups()[0] if value.lower() == 'AWS::Region'.lower(): return self.aws_region elif value.lower() == 'AWS::AccountId'.lower(): return self.account_ctx.id elif value.find(':') != -1: return "${" + value + "}" else: return self.iotpolicy.variables[value] self._document = re.sub('\${(.+?)}', var_replace, self.iotpolicy.policy_json) return self._document
def create_cfn_ref_list_param( self, param_type, name, description, value, ref_attribute=None, default=None, noecho=False, ): "Create a CloudFormation Parameter from a list of refs" stack_output_param = StackOutputParam(name, param_template=self) for list_item in value: if is_ref(list_item): if ref_attribute != None: list_item += '.' + ref_attribute stack = self.paco_ctx.get_ref(list_item) if isinstance(stack, Stack) == False: raise PacoException( PacoErrorCode.Unknown, message="Reference must resolve to a stack") stack_output_key = self.stack.get_stack_outputs_key_from_ref( Reference(list_item)) stack_output_param.add_stack_output(stack, stack_output_key) else: stack_output_param.add_value(list_item) return self.create_cfn_parameter( param_type, name, description, stack_output_param, default, noecho, )
def load_app_in_account_region( parent, account, region, app_name, app_config, project=None, monitor_config=None, read_file_path='not set', ): """ Load an Application from config into an AccountContainer and RegionContainer. Account can be a paco.ref but then the Paco Project must be supplied too. """ account_name = account if is_ref(account): account_name = get_model_obj_from_ref(account, project).name if account_name not in parent: account_cont = AccountContainer(account_name, parent) parent[account_name] = account_cont if region not in parent[account_name]: region_cont = RegionContainer(region, parent[account_name]) parent[account_name][region] = region_cont app = Application(app_name, parent[account_name][region]) parent[account_name][region][app_name] = app if project == None: project = get_parent_by_interface(parent) apply_attributes_from_config( app, app_config, lookup_config=monitor_config, read_file_path=read_file_path, resource_registry=project.resource_registry, ) return app
def __init__( self, stack, paco_ctx, sns_topic_list ): super().__init__( stack, paco_ctx, iam_capabilities=["CAPABILITY_NAMED_IAM"], ) account_ctx = stack.account_ctx aws_region = stack.aws_region self.set_aws_name('LambdaSNSSubs', self.resource_group_name, self.resource_name) awslambda = self.awslambda = self.stack.resource self.init_template('Lambda SNS Subscriptions') # if not enabled finish with only empty placeholder if not self.awslambda.is_enabled(): return # Permissions # SNS Topic Lambda permissions and subscription lambda_arn_param = self.create_cfn_parameter( name='LambdaFunctionArn', param_type='String', description='An SNS Topic ARN to grant permission to.', value=self.awslambda.paco_ref + '.arn' ) idx = 1 for sns_topic in sns_topic_list: # SNS Topic Arn parameters if is_ref(sns_topic): sns_topic_value = sns_topic + '.arn' sns_topic_obj = get_model_obj_from_ref(sns_topic, self.paco_ctx.project) region_name = sns_topic_obj.region_name else: sns_topic_value = sns_topic region_name = sns_topic.split(':')[3] param_name = 'SNSTopicArn%d' % idx self.create_cfn_parameter( name=param_name, param_type='String', description='An SNS Topic ARN to grant permission to.', value=sns_topic_value ) # SNS Topic subscription troposphere.sns.SubscriptionResource( title=param_name + 'Subscription', template=self.template, Endpoint=troposphere.Ref(lambda_arn_param), Protocol='lambda', TopicArn=troposphere.Ref(param_name), Region=region_name ) idx += 1
def add_record_set(self, account_ctx, region, resource, dns, record_set_type, enabled=True, resource_records=None, alias_dns_name=None, alias_hosted_zone_id=None, stack_group=None, async_stack_provision=False, config_ref=None): record_set_config = { 'enabled': enabled, 'dns': dns, 'alias_dns_name': alias_dns_name, 'alias_hosted_zone_id': alias_hosted_zone_id, 'record_set_type': record_set_type, 'resource_records': resource_records } if stack_group == None: # I don't believe this case happens anymore, and it doesn't # look like it does anything. raise PacoException(PacoErrorCode.Unknown) #record_set_stack_group = Route53RecordSetStackGroup( # self.paco_ctx, account_ctx, self #) #record_set_stack_group.add_new_stack( # region, # resource, # Route53RecordSet, # extra_context={'record_set_config': record_set_config, 'record_set_name': dns.domain_name} #) else: stack_account_ctx = account_ctx if is_ref(dns.hosted_zone): hosted_zone_obj = get_model_obj_from_ref( dns.hosted_zone, self.paco_ctx.project) stack_account_ctx = self.paco_ctx.get_account_context( account_ref=hosted_zone_obj.account) stack_orders = None if async_stack_provision == True: stack_orders = [StackOrder.PROVISION, StackOrder.WAITLAST] stack_group.add_new_stack(region, resource, Route53RecordSet, account_ctx=stack_account_ctx, stack_orders=stack_orders, extra_context={ 'record_set_config': record_set_config, 'record_set_name': dns.domain_name })
def __init__( self, stack, paco_ctx, record_set_name, record_set_config, ): if references.is_ref(record_set_name) == True: record_set_name = paco_ctx.get_ref(record_set_name) super().__init__(stack, paco_ctx) hosted_zone_is_private = self.paco_ctx.get_ref( record_set_config['dns'].hosted_zone + '.private_hosted_zone') aws_name = 'RecordSet' if hosted_zone_is_private == True: aws_name = aws_name + '-Private' self.set_aws_name(aws_name, record_set_name) # Troposphere Template Initialization self.init_template('Route53 RecordSet: ' + record_set_name) # Parameters hosted_zone_id = record_set_config['dns'].hosted_zone if references.is_ref(record_set_config['dns'].hosted_zone): hosted_zone_id = record_set_config['dns'].hosted_zone + '.id' hosted_zone_id_param = self.create_cfn_parameter( param_type='String', name='HostedZoneId', description='Record Set Hosted Zone Id', value=hosted_zone_id, ) record_set_type = record_set_config['record_set_type'] if record_set_config['record_set_type'] == 'Alias': record_set_type = 'A' record_set_dict = { 'HostedZoneId': troposphere.Ref(hosted_zone_id_param), 'Name': record_set_name, 'Type': record_set_type } # Alias if record_set_config['record_set_type'] == "Alias": alias_hosted_zone_id_param = self.create_cfn_parameter( param_type='String', name='AliasHostedZoneId', description='Hosted Zone Id for the A Alias', value=record_set_config['alias_hosted_zone_id'], ) alias_dns_name_param = self.create_cfn_parameter( param_type='String', name='AliasDNSName', description='DNS Name for the A Alias', value=record_set_config['alias_dns_name'], ) record_set_dict['AliasTarget'] = { 'DNSName': troposphere.Ref(alias_dns_name_param), 'HostedZoneId': troposphere.Ref(alias_hosted_zone_id_param) } else: record_set_dict['TTL'] = record_set_config['dns'].ttl record_set_dict['ResourceRecords'] = [] for resource_record in record_set_config['resource_records']: # legacy_flag: aim_name_2019_11_28 - hash with aim.ref instead of paco.ref hash_name = resource_record if self.paco_ctx.legacy_flag('aim_name_2019_11_28') == True: hash_name = 'aim' + hash_name[4:] record_hash = utils.md5sum(str_data=hash_name) resource_record_param = self.create_cfn_parameter( param_type='String', name='ResourceRecord' + record_hash, description='Resource Record: ' + hash_name, value=resource_record, ) record_set_dict['ResourceRecords'].append( troposphere.Ref(resource_record_param)) record_set_res = troposphere.route53.RecordSetType.from_dict( self.create_cfn_logical_id_join(['RecordSet']), record_set_dict) self.template.add_resource(record_set_res)
def create_group_rules(self, sg_group_id, sg_name, sg_config, template): sg_group_config_ref = 'paco.ref ' + '.'.join([self.config_ref, sg_name]) # Security Group Ingress and Egress rules for sg_rule_type in ['Ingress', 'Egress']: # Remove Ingress/Egress rules when disabled if sg_config.is_enabled() == False: break if sg_rule_type == 'Ingress': sg_rule_list = sg_config.ingress tropo_rule_method = troposphere.ec2.SecurityGroupIngress elif sg_rule_type == 'Egress': sg_rule_list = sg_config.egress tropo_rule_method = troposphere.ec2.SecurityGroupEgress else: raise StackException(PacoErrorCode.Unknown) # Ingress and Egress rules for sg_rule_config in sg_rule_list: rule_dict = { 'GroupId': self.create_group_param_ref(sg_group_config_ref, template), 'IpProtocol': str(sg_rule_config.protocol), 'FromPort': None, 'ToPort': None, 'Description': None } # Rule Name sg_rule_hash = sg_rule_config.obj_hash()[:8].upper() rule_name = self.create_cfn_logical_id(sg_name + sg_rule_hash + sg_rule_type + sg_rule_config.name) # FromPort and ToPort if sg_rule_config.port != -1: rule_dict['FromPort'] = str(sg_rule_config.port) rule_dict['ToPort'] = str(sg_rule_config.port) else: rule_dict['FromPort'] = sg_rule_config.from_port rule_dict['ToPort'] = sg_rule_config.to_port # Description if sg_rule_config.description != None and sg_rule_config.description != '': rule_dict['Description'] = sg_rule_config.description else: rule_dict['Description'] = 'unknown' # Source and Destination if sg_rule_config.cidr_ip != '': rule_dict['CidrIp'] = sg_rule_config.cidr_ip elif sg_rule_config.cidr_ip_v6 != '': rule_dict['CidrIpv6'] = sg_rule_config.cidr_ip_v6 elif getattr(sg_rule_config, 'source_security_group', '') != '': if references.is_ref(sg_rule_config.source_security_group): rule_dict['SourceSecurityGroupId'] = self.create_group_param_ref( sg_rule_config.source_security_group, template) else: rule_dict['SourceSecurityGroupId'] = sg_rule_config.source_security_group elif getattr(sg_rule_config, 'destination_security_group', '') != '': if references.is_ref(sg_rule_config.destination_security_group): rule_dict['DestinationSecurityGroupId'] = self.create_group_param_ref( sg_rule_config.destination_security_group, template) else: rule_dict['DestinationSecurityGroupId'] = sg_rule_config.destination_security_group else: raise StackException(PacoErrorCode.Unknown) # SecurityGroup Ingress/Egress rule_res = tropo_rule_method.from_dict(rule_name, rule_dict) template.add_resource(rule_res)
def __init__(self, stack, paco_ctx,): rds_config = stack.resource config_ref = rds_config.paco_ref_parts super().__init__(stack, paco_ctx, iam_capabilities=["CAPABILITY_IAM"]) self.set_aws_name('RDS', self.resource_group_name, self.resource.name) self.init_template('RDS') template = self.template if not rds_config.is_enabled(): return rds_logical_id = 'PrimaryDBInstance' # DB Subnet Group db_subnet_id_list_param = self.create_cfn_parameter( param_type='List<AWS::EC2::Subnet::Id>', name='DBSubnetIdList', description='The list of subnet IDs where this database will be provisioned.', value=rds_config.segment+'.subnet_id_list', ) db_subnet_group_res = troposphere.rds.DBSubnetGroup( title='DBSubnetGroup', template =template, DBSubnetGroupDescription=troposphere.Ref('AWS::StackName'), SubnetIds=troposphere.Ref(db_subnet_id_list_param), ) # DB Parameter Group engine_major_version = None if rds_config.parameter_group == None: # No Parameter Group supplied, create one engine_major_version = '.'.join(rds_config.engine_version.split('.')[0:2]) param_group_family = gen_vocabulary.rds_engine_versions[rds_config.engine][rds_config.engine_version]['param_group_family'] dbparametergroup_ref = troposphere.rds.DBParameterGroup( "DBParameterGroup", template = template, Family=param_group_family, Description=troposphere.Ref('AWS::StackName') ) else: # Use an existing Parameter Group dbparametergroup_ref = self.create_cfn_parameter( name='DBParameterGroupName', param_type='String', description='DB Parameter Group Name', value=rds_config.parameter_group + '.name', ) # Option Group option_group_res = None if len(rds_config.option_configurations) > 0 or (hasattr(rds_config, 'backup_restore_bucket') and rds_config.backup_restore_bucket != None): option_group_dict = { 'EngineName': rds_config.engine, 'MajorEngineVersion': engine_major_version, 'OptionGroupDescription': troposphere.Ref('AWS::StackName') } option_config_list = [] if len(rds_config.option_configurations) > 0: for option_config in rds_config.option_configurations: option_config_dict = { 'OptionName': option_config.option_name, } if len(option_config.option_settings) > 0: option_config_dict['OptionSettings'] = [] idx = 0 for option_setting in option_config.option_settings: option_value = option_setting.value if references.is_ref(option_setting.value): # Use an existing Parameter Group option_setting_value_param = self.create_cfn_parameter( name=f'OptionsGroupValue{idx}', param_type='String', description=f'DB Option Settings Value {idx}', value=option_setting.value ) option_value = troposphere.Ref(option_setting_value_param) option_setting_dict = { 'Name': option_setting.name, 'Value': option_value } option_config_dict['OptionSettings'].append(option_setting_dict) option_config_list.append(option_config_dict) if hasattr(rds_config, 'backup_restore_bucket') and rds_config.backup_restore_bucket != None: option_config_dict = { 'OptionName': 'SQLSERVER_BACKUP_RESTORE', 'OptionSettings': [] } # S3 Bucket Arn Param backup_restore_bucket_arn_param = self.create_cfn_parameter( name='SQLServerBackupRestoreBucketArn', param_type='String', description=f'DB Option Setting SQLServer Backup Restore Bucket ARN', value=f'{rds_config.backup_restore_bucket}.arn' ) # Create Role for SQLServer Bucket sqlserver_backup_restore_role = troposphere.iam.Role( title='SQLServerBackupRestoreRole', template=self.template, AssumeRolePolicyDocument=PolicyDocument( Statement=[ Statement( Effect=Allow, Action=[Action("sts", "AssumeRole")], Principal=Principal("Service", "rds.amazonaws.com") ) ] ), Policies=[ troposphere.iam.Policy( PolicyName="S3BucketAccess", PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Action=[ awacs.s3.ListBucket, awacs.s3.GetBucketLocation ], Resource=[troposphere.Ref(backup_restore_bucket_arn_param)], ), Statement( Effect=Allow, Action=[ Action('s3', 'GetObjectMetaData'), awacs.s3.GetObject, awacs.s3.PutObject, awacs.s3.ListMultipartUploadParts, awacs.s3.AbortMultipartUpload ], Resource=[troposphere.Sub("${SQLServerBackupRestoreBucketArn}/*")] ) ] ) ) ], Path="/", ) option_value = troposphere.GetAtt(sqlserver_backup_restore_role, 'Arn') option_setting_dict = { 'Name': 'IAM_ROLE_ARN', 'Value': option_value } option_config_dict['OptionSettings'].append(option_setting_dict) option_config_list.append(option_config_dict) option_group_dict['OptionConfigurations'] = option_config_list option_group_res = troposphere.rds.OptionGroup.from_dict( 'OptionGroup', option_group_dict ) template.add_resource(option_group_res) # RDS MultiAZ (Mysql, Postgresql) sg_param_ref_list = [] for sg_ref in rds_config.security_groups: sg_hash = utils.md5sum(str_data=sg_ref) sg_param = self.create_cfn_parameter( param_type='AWS::EC2::SecurityGroup::Id', name=self.create_cfn_logical_id('SecurityGroup'+sg_hash), description='VPC Security Group to attach to the RDS.', value=sg_ref+'.id', ) sg_param_ref_list.append(troposphere.Ref(sg_param)) db_instance_dict = { 'Engine': rds_config.engine, 'EngineVersion': rds_config.engine_version, 'DBInstanceIdentifier': troposphere.Ref('AWS::StackName'), 'DBInstanceClass': rds_config.db_instance_type, 'DBSubnetGroupName': troposphere.Ref(db_subnet_group_res), 'DBParameterGroupName': troposphere.Ref(dbparametergroup_ref), 'CopyTagsToSnapshot': True, 'AllowMajorVersionUpgrade': rds_config.allow_major_version_upgrade, 'AutoMinorVersionUpgrade': rds_config.auto_minor_version_upgrade, 'AllocatedStorage': rds_config.storage_size_gb, 'StorageType': rds_config.storage_type, 'BackupRetentionPeriod': rds_config.backup_retention_period, 'Port': rds_config.port, 'PreferredBackupWindow': rds_config.backup_preferred_window, 'PreferredMaintenanceWindow': rds_config.maintenance_preferred_window, 'VPCSecurityGroups': sg_param_ref_list } if schemas.IRDSMultiAZ.providedBy(rds_config): db_instance_dict['MultiAZ'] = rds_config.multi_az # License Model if rds_config.license_model: db_instance_dict['LicenseModel'] = rds_config.license_model # Deletion Protection if rds_config.deletion_protection: db_instance_dict['DeletionProtection'] = rds_config.deletion_protection # CloudWatch Logs Exports if len(rds_config.cloudwatch_logs_exports) > 0: db_instance_dict['EnableCloudwatchLogsExports'] = rds_config.cloudwatch_logs_exports # Option Group if option_group_res != None: db_instance_dict['OptionGroupName'] = troposphere.Ref(option_group_res) # DB Snapshot Identifier if rds_config.db_snapshot_identifier == '' or rds_config.db_snapshot_identifier == None: db_snapshot_id_enabled = False else: db_snapshot_id_enabled = True if db_snapshot_id_enabled == True: db_instance_dict['DBSnapshotIdentifier'] = rds_config.db_snapshot_identifier # To restore an existing DB from a Snapshot, RDS will need to replace the RDS # resource, in which case the DBInstanceIdentifier name CAN NOT be set # del db_instance_dict['DBInstanceIdentifier'] # Username and Passsword if db_snapshot_id_enabled == False: # Encryption if rds_config.storage_encrypted == True: db_instance_dict['StorageEncrypted'] = True if rds_config.kms_key_id and rds_config.kms_key_id != '': db_instance_dict['KmsKeyId'] = rds_config.kms_key_id # Username & Password db_instance_dict['MasterUsername'] = rds_config.master_username if rds_config.secrets_password: # Password from Secrets Manager sta_logical_id = 'SecretTargetAttachmentRDS' secret_arn_param = self.create_cfn_parameter( param_type='String', name='RDSSecretARN', description='The ARN for the secret for the RDS master password.', value=rds_config.secrets_password + '.arn', ) secret_target_attachment_resource = troposphere.secretsmanager.SecretTargetAttachment( title=sta_logical_id, SecretId=troposphere.Ref(secret_arn_param), TargetId=troposphere.Ref(rds_logical_id), TargetType='AWS::RDS::DBInstance' ) template.add_resource(secret_target_attachment_resource) db_instance_dict['MasterUserPassword'] = troposphere.Join( '', ['{{resolve:secretsmanager:', troposphere.Ref(secret_arn_param), ':SecretString:password}}' ] ) else: master_password_param = self.create_cfn_parameter( param_type='String', name='MasterUserPassword', description='The master user password.', value=rds_config.master_user_password, noecho=True, ) db_instance_dict['MasterUserPassword'] = troposphere.Ref(master_password_param) db_instance_res = troposphere.rds.DBInstance.from_dict( rds_logical_id, db_instance_dict ) template.add_resource(db_instance_res) # Outputs self.create_output( title='DBInstanceName', description='DB Instance Name', value=troposphere.Ref(db_instance_res), ref=config_ref + ".name", ) self.create_output( title='RDSEndpointAddress', description='RDS Endpoint URL', value=troposphere.GetAtt(db_instance_res, 'Endpoint.Address'), ref=config_ref + ".endpoint.address", ) # Legacy Route53 Record Set if self.paco_ctx.legacy_flag('route53_record_set_2019_10_16') == True: if rds_config.is_dns_enabled() == True: for dns_config in rds_config.dns_config: dns_hash = utils.md5sum(str_data=(rds_config.hosted_zone+rds_config.domain_name)) primary_hosted_zone_id_param = self.create_cfn_parameter( param_type='String', name='DNSHostedZoneId'+dns_hash, description='The hosted zone id to create the Route53 record set.', value=rds_config.primary_hosted_zone+'.id', ) record_set_res = troposphere.route53.RecordSetType( title = 'RecordSet'+dns_hash, template = template, Comment = 'RDS Primary DNS', HostedZoneId = troposphere.Ref(primary_hosted_zone_id_param), Name = rds_config.primary_domain_name, Type = 'CNAME', TTL = dns_config.ttl, ResourceRecords = [ troposphere.GetAtt(db_instance_res, 'Endpoint.Address')] ) record_set_res.DependsOn = db_instance_res # DNS - Route53 Record Set if self.paco_ctx.legacy_flag('route53_record_set_2019_10_16') == False: if rds_config.is_dns_enabled() == True: route53_ctl = self.paco_ctx.get_controller('route53') for dns_config in rds_config.dns: route53_ctl.add_record_set( self.account_ctx, self.aws_region, rds_config, enabled=rds_config.is_enabled(), dns=dns_config, record_set_type='CNAME', resource_records=['paco.ref ' + config_ref + '.endpoint.address'], stack_group=self.stack.stack_group, async_stack_provision=True, config_ref=rds_config.paco_ref_parts + '.dns' )
def __init__( self, stack, paco_ctx, role_profile_arn, ec2_manager_user_data_script, ec2_manager_cache_id ): self.asg_config = asg_config = stack.resource asg_config_ref = asg_config.paco_ref_parts self.ec2_manager_cache_id = ec2_manager_cache_id super().__init__(stack, paco_ctx, iam_capabilities=["CAPABILITY_NAMED_IAM"]) self.set_aws_name('ASG', self.resource_group_name, self.resource_name) self.instance_iam_role_name = self.paco_ctx.get_ref(asg_config.paco_ref + '.instance_iam_role.name') # Troposphere self.init_template('AutoScalingGroup: ' + self.ec2_manager_cache_id) template = self.template if self.asg_config.is_enabled() == False: return # InstanceAMI Parameter is preserved in disabled templates so it can be smoothly disabled/enabled if self.asg_config.instance_ami_ignore_changes: ignore_changes = True else: ignore_changes = False instance_ami_param = self.create_cfn_parameter( param_type='String', name='InstanceAMI', description='The Amazon Machine Image Id to launch instances with.', value=asg_config.instance_ami, ignore_changes=ignore_changes, ) # if the network for the ASG is disabled, only use an empty placeholder if not self.asg_config.env_region_obj.network.is_enabled(): return security_group_list_param = self.create_cfn_ref_list_param( param_type='List<AWS::EC2::SecurityGroup::Id>', name='SecurityGroupList', description='List of security group ids to attach to the ASG instances.', value=asg_config.security_groups, ref_attribute='id', ) launch_config_dict = { 'AssociatePublicIpAddress': asg_config.associate_public_ip_address, 'EbsOptimized': asg_config.ebs_optimized, 'ImageId': troposphere.Ref(instance_ami_param), 'InstanceMonitoring': asg_config.instance_monitoring, 'InstanceType': asg_config.instance_type, 'SecurityGroups': troposphere.Ref(security_group_list_param), } if asg_config.instance_key_pair != None: instance_key_pair_param = self.create_cfn_parameter( param_type='String', name='InstanceKeyPair', description='The EC2 SSH KeyPair to assign each ASG instance.', value=asg_config.instance_key_pair+'.keypair_name', ) launch_config_dict['KeyName'] = troposphere.Ref(instance_key_pair_param) # BlockDeviceMappings if len(asg_config.block_device_mappings) > 0: mappings = [] for bdm in asg_config.block_device_mappings: mappings.append( bdm.cfn_export_dict ) launch_config_dict["BlockDeviceMappings"] = mappings user_data_script = '' if ec2_manager_user_data_script != None: user_data_script += ec2_manager_user_data_script if asg_config.user_data_script != '': user_data_script += asg_config.user_data_script.replace('#!/bin/bash', '') if user_data_script != '': user_data_64 = base64.b64encode(user_data_script.encode('ascii')) user_data_script_param = self.create_cfn_parameter( param_type='String', name='UserDataScript', description='User data script to run at instance launch.', value=user_data_64.decode('ascii'), ) launch_config_dict['UserData'] = troposphere.Ref(user_data_script_param) if role_profile_arn != None: launch_config_dict['IamInstanceProfile'] = role_profile_arn # CloudFormation Init if asg_config.cfn_init and asg_config.is_enabled(): launch_config_dict['Metadata'] = troposphere.autoscaling.Metadata( asg_config.cfn_init.export_as_troposphere() ) for key, value in asg_config.cfn_init.parameters.items(): if type(value) == type(str()): param_type = 'String' elif type(value) == type(int()) or type(value) == type(float()): param_type = 'Number' else: raise UnsupportedCloudFormationParameterType( "Can not cast {} of type {} to a CloudFormation Parameter type.".format( value, type(value) ) ) cfn_init_param = self.create_cfn_parameter( param_type=param_type, name=key, description='CloudFormation Init Parameter {} for ASG {}'.format(key, asg_config.name), value=value, ) # Launch Configuration resource launch_config_res = troposphere.autoscaling.LaunchConfiguration.from_dict( 'LaunchConfiguration', launch_config_dict ) template.add_resource(launch_config_res) subnet_list_ref = asg_config.env_region_obj.network.vpc.segments[asg_config.segment].paco_ref if asg_config.availability_zone == 'all': subnet_list_ref += '.subnet_id_list' else: subnet_list_ref += '.az{}.subnet_id'.format(asg_config.availability_zone) asg_subnet_list_param = self.create_cfn_parameter( param_type='List<AWS::EC2::Subnet::Id>', name='ASGSubnetList', description='A list of subnets where the ASG will launch instances', value=subnet_list_ref ) min_instances = asg_config.min_instances if asg_config.is_enabled() else 0 desired_capacity = asg_config.desired_capacity if asg_config.is_enabled() else 0 desired_capacity_param = self.create_cfn_parameter( param_type='String', name='DesiredCapacity', description='The desired capacity of instances to run in the ASG.', value=desired_capacity, ignore_changes=self.asg_config.desired_capacity_ignore_changes, ) asg_dict = { 'AutoScalingGroupName': asg_config.get_aws_name(), 'DesiredCapacity': troposphere.Ref(desired_capacity_param), 'HealthCheckGracePeriod': asg_config.health_check_grace_period_secs, 'LaunchConfigurationName': troposphere.Ref(launch_config_res), 'MaxSize': asg_config.max_instances, 'MinSize': min_instances, 'Cooldown': asg_config.cooldown_secs, 'HealthCheckType': asg_config.health_check_type, 'TerminationPolicies': asg_config.termination_policies, 'VPCZoneIdentifier': troposphere.Ref(asg_subnet_list_param), } if asg_config.load_balancers != None and len(asg_config.load_balancers) > 0: load_balancer_names_param = self.create_cfn_ref_list_param( param_type='List<String>', name='LoadBalancerNames', description='A list of load balancer names to attach to the ASG', value=asg_config.load_balancers, ) asg_dict['LoadBalancerNames'] = troposphere.Ref(load_balancer_names_param) if asg_config.is_enabled() and asg_config.disable_target_groups == False: if asg_config.target_groups != None and len(asg_config.target_groups) > 0: asg_dict['TargetGroupARNs'] = [] for target_group_arn in asg_config.target_groups: target_group_arn_param = self.create_cfn_parameter( param_type='String', name='TargetGroupARNs'+utils.md5sum(str_data=target_group_arn), description='A Target Group ARNs to attach to the ASG', value=target_group_arn+'.arn', ) asg_dict['TargetGroupARNs'].append(troposphere.Ref(target_group_arn_param)) if asg_config.monitoring != None and \ asg_config.monitoring.is_enabled() == True and \ len(asg_config.monitoring.asg_metrics) > 0: asg_dict['MetricsCollection'] = [{ 'Granularity': '1Minute', 'Metrics': asg_config.monitoring.asg_metrics }] # ASG Tags asg_dict['Tags'] = [ troposphere.autoscaling.Tag('Name', asg_dict['AutoScalingGroupName'], True) ] # TODO: DNS: To be enabled once cftempaltes/iam_managed_policies.py # is ported to troposphere # if len(asg_config.dns) > 0 and asg_config.is_enabled(): # idx = 0 # for dns_config in asg_config.dns: # if references.is_ref(dns_config.hosted_zone): # hosted_zone_value = dns_config.hosted_zone+'.id' # else: # hosted_zone_value = dns_config.hosted_zone # dns_hosted_zone_param = self.create_cfn_parameter( # param_type='String', # name=f'DNSHostedZone{idx}', # description=f'DNS Hosted Zone for index {idx}', # value=dns_value # ) # asg_dict['Tags'].append( # troposphere.autoscaling.Tag(f'Paco-DNS-Hosted-Zone-{idx}', troposphere.Ref(dns_hosted_zone_param), True) # ) # dns_domain_param = self.create_cfn_parameter( # param_type='String', # name=f'DNSDomain{idx}', # description=f'DNS Domain name for index {idx}', # value=dns_value # ) # asg_dict['Tags'].append( # troposphere.autoscaling.Tag(f'Paco-DNS-Domain-{idx}', troposphere.Ref(dns_domain_param), True) # ) # idx += 1 # EIP if asg_config.eip != None and asg_config.is_enabled(): if references.is_ref(asg_config.eip) == True: eip_value = asg_config.eip + '.allocation_id' else: eip_value = asg_config.eip eip_id_param = self.create_cfn_parameter( param_type='String', name='EIPAllocationId', description='The allocation Id of the EIP to attach to the instance.', value=eip_value, ) asg_dict['Tags'].append( troposphere.autoscaling.Tag('Paco-EIP-Allocation-Id', troposphere.Ref(eip_id_param), True) ) # EFS FileSystemId Tags if asg_config.is_enabled(): for efs_mount in asg_config.efs_mounts: target_hash = utils.md5sum(str_data=efs_mount.target) if references.is_ref(efs_mount.target) == True: efs_value = efs_mount.target + '.id' else: efs_value = efs_mount.target efs_id_param = self.create_cfn_parameter( param_type='String', name='EFSId'+target_hash, description='EFS Id', value=efs_value, ) asg_tag = troposphere.autoscaling.Tag( 'efs-id-' + target_hash, troposphere.Ref(efs_id_param), True ) asg_dict['Tags'].append(asg_tag) # EBS Volume Id and Device name Tags for ebs_volume_mount in asg_config.ebs_volume_mounts: if ebs_volume_mount.is_enabled() == False: continue volume_hash = utils.md5sum(str_data=ebs_volume_mount.volume) if references.is_ref(ebs_volume_mount.volume) == True: ebs_volume_id_value = ebs_volume_mount.volume + '.id' else: ebs_volume_id_value = ebs_volume_mount.volume # Volume Id ebs_volume_id_param = self.create_cfn_parameter( param_type='String', name='EBSVolumeId'+volume_hash, description='EBS Volume Id', value=ebs_volume_id_value ) ebs_volume_id_tag = troposphere.autoscaling.Tag( 'ebs-volume-id-' + volume_hash, troposphere.Ref(ebs_volume_id_param), True ) asg_dict['Tags'].append(ebs_volume_id_tag) #ebs_device_param = self.create_cfn_parameter( # param_type='String', # name='EBSDevice'+volume_hash, # description='EBS Device Name', # value=ebs_volume_mount.device, #) #ebs_device_tag = troposphere.autoscaling.Tag( # 'ebs-device-' + volume_hash, # troposphere.Ref(ebs_device_param), # True #) #asg_dict['Tags'].append(ebs_device_tag) # ECS Cluster Configuration if asg_config.is_enabled() and asg_config.ecs != None: ecs_cluster_name_param = self.create_cfn_parameter( param_type='String', name='ECSClusterName', description='ECS Cluster Name', value=asg_config.ecs.cluster + '.name' ) asg_tag = troposphere.autoscaling.Tag( 'Paco-ECSCluster-Name', troposphere.Ref(ecs_cluster_name_param), True ) asg_dict['Tags'].append(asg_tag) # ECS Cluster Capacity Manager requries NewInstancesProtectedFromScaleIn to be eneabled if it is going to manage instance protection if asg_config.ecs.capacity_provider != None and asg_config.ecs.capacity_provider.is_enabled(): if asg_config.ecs.capacity_provider.managed_instance_protection == True: asg_dict['NewInstancesProtectedFromScaleIn'] = True # ECS Release Phase Configuration policy_statements = [] if asg_config.script_manager: if asg_config.script_manager.ecr_deploy: self.script_manager_ecr_deploy(asg_config.script_manager.ecr_deploy, asg_dict, asg_config, template) if asg_config.script_manager.ecs: self.script_manager_ecs(asg_config.script_manager.ecs, asg_dict, asg_config, template) # ECR Repository access self.set_ecr_repositories_statements( asg_config.ecr, template, 'ECRAccess', [self.instance_iam_role_name] ) asg_res = troposphere.autoscaling.AutoScalingGroup.from_dict( 'ASG', asg_dict ) template.add_resource(asg_res) asg_res.DependsOn = launch_config_res # only create an UpdatePolicy if it is enabled update_policy = asg_config.rolling_update_policy if update_policy.enabled == True: if update_policy.pause_time == '' and update_policy.wait_on_resource_signals == True: # if wait_on_resource_signals is true the default pause time is 5 minutes update_policy.pause_time = 'PT5M' elif update_policy.pause_time == '': update_policy.pause_time = 'PT0S' min_instances_in_service_param = self.create_cfn_parameter( param_type='String', name='MinInstancesInService', description='Rolling update minimum instances to remain in service during update.', value=update_policy.min_instances_in_service ) # UpdatePolicy properties asg_res.UpdatePolicy = troposphere.policies.UpdatePolicy( AutoScalingRollingUpdate=troposphere.policies.AutoScalingRollingUpdate( MaxBatchSize=update_policy.max_batch_size, MinInstancesInService=troposphere.Ref(min_instances_in_service_param), PauseTime=update_policy.pause_time, WaitOnResourceSignals=update_policy.wait_on_resource_signals, SuspendProcesses=[ 'HealthCheck', 'ReplaceUnhealthy', 'AlarmNotification', 'ScheduledActions' ] ) ) self.create_output( title='ASGName', value=troposphere.Ref(asg_res), description='Auto Scaling Group Name', ref=[asg_config_ref, asg_config_ref+'.name'] ) # CPU Scaling Policy if asg_config.scaling_policy_cpu_average > 0: troposphere.autoscaling.ScalingPolicy( title='CPUAverageScalingPolicy', template=template, AutoScalingGroupName=troposphere.Ref(asg_res), PolicyType='TargetTrackingScaling', TargetTrackingConfiguration=troposphere.autoscaling.TargetTrackingConfiguration( PredefinedMetricSpecification=troposphere.autoscaling.PredefinedMetricSpecification( PredefinedMetricType='ASGAverageCPUUtilization' ), TargetValue=float(asg_config.scaling_policy_cpu_average) ) ) if asg_config.scaling_policies != None: for scaling_policy_name in asg_config.scaling_policies.keys(): scaling_policy = asg_config.scaling_policies[scaling_policy_name] if scaling_policy.is_enabled() == False: continue scaling_policy_res = troposphere.autoscaling.ScalingPolicy( title=self.create_cfn_logical_id_join( ['ScalingPolicy', scaling_policy_name], camel_case=True ), template=template, AdjustmentType=scaling_policy.adjustment_type, AutoScalingGroupName=troposphere.Ref(asg_res), PolicyType=scaling_policy.policy_type, ScalingAdjustment=scaling_policy.scaling_adjustment, Cooldown=scaling_policy.cooldown ) alarm_idx = 0 for alarm in scaling_policy.alarms: dimension_list = [] for dimension in alarm.dimensions: dimension_value = dimension.value if dimension.name == 'AutoScalingGroupName' and references.is_ref(dimension.value): # Reference the local ASG if the ref points here dimension_ref = Reference(dimension.value) if dimension_ref.ref == self.config_ref: dimension_value = troposphere.Ref(asg_res) dimension_res = troposphere.cloudwatch.MetricDimension( Name=dimension.name, Value=dimension_value ) dimension_list.append(dimension_res) if len(dimension_list) == 0: dimension_list = troposphere.Ref('AWS::NoValue') # Alarm Resource troposphere.cloudwatch.Alarm( title=self.create_cfn_logical_id_join( ['ScalingPolicyAlarm', scaling_policy_name, str(alarm_idx)], camel_case=True ), template=template, ActionsEnabled=True, AlarmActions=[troposphere.Ref(scaling_policy_res)], AlarmDescription=alarm.alarm_description, ComparisonOperator=alarm.comparison_operator, MetricName=alarm.metric_name, Namespace=alarm.namespace, Period=alarm.period, Threshold=alarm.threshold, EvaluationPeriods=alarm.evaluation_periods, Statistic=alarm.statistic, Dimensions=dimension_list ) alarm_idx += 1 if asg_config.lifecycle_hooks != None: for lifecycle_hook_name in asg_config.lifecycle_hooks: lifecycle_hook = asg_config.lifecycle_hooks[lifecycle_hook_name] if lifecycle_hook.is_enabled() == False: continue troposphere.autoscaling.LifecycleHook( title = self.create_cfn_logical_id_join( ['LifecycleHook', lifecycle_hook_name], camel_case=True ), template=template, AutoScalingGroupName=troposphere.Ref(asg_res), DefaultResult=lifecycle_hook.default_result, LifecycleTransition=lifecycle_hook.lifecycle_transition, RoleARN=lifecycle_hook.role_arn, NotificationTargetARN=lifecycle_hook.notification_target_arn ) if asg_config.patch_manager != None and asg_config.patch_manager.is_enabled(): patch_ssm_associate_dict = { 'AssociationName': f'OpusPatchBaseline{asg_config.patch_manager.operation}', 'Name': 'AWS-RunPatchBaseline', 'ScheduleExpression': asg_config.patch_manager.schedule_expression, 'Targets': [{ 'Key': 'tag:Name', 'Values': [asg_config.get_aws_name()] }], 'Parameters': { 'Operation': [asg_config.patch_manager.operation] }, 'WaitForSuccessTimeoutSeconds': 900 } patch_ssm_associate_res = troposphere.ssm.Association.from_dict( 'PatchAssociation', patch_ssm_associate_dict ) template.add_resource(patch_ssm_associate_res)
def __init__(self, stack, paco_ctx, factory_name): cloudfront_config = stack.resource config_ref = stack.stack_ref super().__init__(stack, paco_ctx) self.set_aws_name('CloudFront', self.resource_group_name, self.resource_name, factory_name) origin_access_id_enabled = False self.init_template('CloudFront Distribution') template = self.template target_origin_param = self.create_cfn_parameter( param_type='String', name='TargetOrigin', description='Target Origin', value=cloudfront_config.default_cache_behavior.target_origin, ) distribution_config_dict = { 'Enabled': cloudfront_config.is_enabled(), 'DefaultRootObject': cloudfront_config.default_root_object, 'HttpVersion': 'http2', 'DefaultCacheBehavior': { 'AllowedMethods': cloudfront_config.default_cache_behavior.allowed_methods, 'DefaultTTL': cloudfront_config.default_cache_behavior.default_ttl, 'TargetOriginId': troposphere.Ref(target_origin_param), 'ViewerProtocolPolicy': cloudfront_config.default_cache_behavior.viewer_protocol_policy }, 'PriceClass': 'PriceClass_' + cloudfront_config.price_class } if cloudfront_config.is_enabled() == True: # force the certificate to be in us-east-1, as that's the only CloudFront region if cloudfront_config.viewer_certificate.certificate != None: certificate = get_model_obj_from_ref( cloudfront_config.viewer_certificate.certificate, self.paco_ctx.project) if certificate.region != 'us-east-1': raise InvalidCloudFrontCertificateRegion( f'Certficate region is: {certificate.region}: {certificate.paco_ref}' ) viewer_certificate_param = self.create_cfn_parameter( name='ViewerCertificateArn', description="ACM Viewer Certificate ARN", param_type='String', value=cloudfront_config.viewer_certificate.certificate + '.arn', ) distribution_config_dict['ViewerCertificate'] = { 'AcmCertificateArn': troposphere.Ref(viewer_certificate_param), 'SslSupportMethod': cloudfront_config.viewer_certificate.ssl_supported_method, 'MinimumProtocolVersion': cloudfront_config.viewer_certificate. minimum_protocol_version } if cloudfront_config.default_cache_behavior.min_ttl != -1: distribution_config_dict['DefaultCacheBehavior'][ 'MinTTL'] = cloudfront_config.default_cache_behavior.min_ttl if cloudfront_config.default_cache_behavior.max_ttl != -1: distribution_config_dict['DefaultCacheBehavior'][ 'MaxTTL'] = cloudfront_config.default_cache_behavior.max_ttl # Lambda Function Association Parameters - for both DefaultCacheBehaviour and CacheBehaviours lambda_associations = [] lambda_params = {} associations = cloudfront_config.default_cache_behavior.lambda_function_associations[:] for cache_behaviour in cloudfront_config.cache_behaviors: for lambda_association in cache_behaviour.lambda_function_associations: associations.append(lambda_association) for lambda_association in associations: lambda_ref = lambda_association.lambda_function if lambda_ref not in lambda_params: if lambda_ref.endswith('.autoversion.arn'): lambda_name = self.create_cfn_logical_id( 'Lambda' + utils.md5sum(str_data=lambda_ref)) lambda_params[lambda_ref] = self.create_cfn_parameter( param_type='String', name=lambda_name, description= f'Lambda Function Associated for {lambda_ref}', value=lambda_ref, ) # Lambda Function Association for DefaultCacheBehavior for lambda_association in cloudfront_config.default_cache_behavior.lambda_function_associations: lambda_associations.append({ 'EventType': lambda_association.event_type, 'IncludeBody': lambda_association.include_body, 'LambdaFunctionARN': troposphere.Ref( lambda_params[lambda_association.lambda_function]), }) if len(lambda_associations) > 0: # ToDo: PR this monkey-patch into Troposphere from troposphere.validators import boolean troposphere.cloudfront.LambdaFunctionAssociation.props[ 'IncludeBody'] = (boolean, False) distribution_config_dict['DefaultCacheBehavior'][ 'LambdaFunctionAssociations'] = lambda_associations # Domain Alises and Record Sets aliases_list = [] aliases_param_map = {} for alias in cloudfront_config.domain_aliases: alias_hash = utils.md5sum(str_data=alias.domain_name) domain_name_param = 'DomainAlias' + alias_hash alias_param = self.create_cfn_parameter( param_type='String', name=domain_name_param, description='Domain Alias CNAME', value=alias.domain_name) aliases_list.append(troposphere.Ref(alias_param)) aliases_param_map[alias.domain_name] = alias_param distribution_config_dict['Aliases'] = aliases_list # DefaultcacheBehavior # Forward Values if cloudfront_config.default_cache_behavior.origin_request_policy_id != None: distribution_config_dict['DefaultCacheBehavior'][ 'OriginRequestPolicyId'] = cloudfront_config.default_cache_behavior.origin_request_policy_id if cloudfront_config.default_cache_behavior.cache_policy_id != None: distribution_config_dict['DefaultCacheBehavior'][ 'CachePolicyId'] = cloudfront_config.default_cache_behavior.cache_policy_id else: forwarded_values_config = cloudfront_config.default_cache_behavior.forwarded_values forwarded_values_dict = { 'Cookies': { 'Forward': 'none', }, 'QueryString': str(forwarded_values_config.query_string) } # Cookies if cloudfront_config.s3_origin_exists() == False: forwarded_values_dict['Cookies'][ 'Forward'] = forwarded_values_config.cookies.forward if len(forwarded_values_config.cookies.whitelisted_names) > 0: forwarded_values_dict['Cookies'][ 'WhitelistedNames'] = forwarded_values_config.cookies.whitelisted_names # Headers if cloudfront_config.s3_origin_exists() == False: forwarded_values_dict[ 'Headers'] = cloudfront_config.default_cache_behavior.forwarded_values.headers distribution_config_dict['DefaultCacheBehavior'][ 'ForwardedValues'] = forwarded_values_dict # Cache Behaviors if len(cloudfront_config.cache_behaviors) > 0: cache_behaviors_list = [] target_origin_param_map = {} for cache_behavior in cloudfront_config.cache_behaviors: target_origin_hash = utils.md5sum( str_data=cache_behavior.target_origin) if target_origin_hash not in target_origin_param_map.keys(): cb_target_origin_param = self.create_cfn_parameter( param_type='String', name=self.create_cfn_logical_id( 'TargetOriginCacheBehavior' + target_origin_hash), description='Target Origin', value=cache_behavior.target_origin, ) target_origin_param_map[ target_origin_hash] = cb_target_origin_param else: cb_target_origin_param = target_origin_param_map[ target_origin_hash] cache_behavior_dict = { 'PathPattern': cache_behavior.path_pattern, 'AllowedMethods': cache_behavior.allowed_methods, 'DefaultTTL': cache_behavior.default_ttl, 'TargetOriginId': troposphere.Ref(cb_target_origin_param), 'ViewerProtocolPolicy': cache_behavior.viewer_protocol_policy } # CacheBehavior Lambda Function Associations if len(cache_behavior.lambda_function_associations) > 0: lambda_associations = [] for lambda_association in cache_behavior.lambda_function_associations: lambda_associations.append({ 'EventType': lambda_association.event_type, 'IncludeBody': lambda_association.include_body, 'LambdaFunctionARN': troposphere.Ref(lambda_params[ lambda_association.lambda_function]), }) cache_behavior_dict[ 'LambdaFunctionAssociations'] = lambda_associations # CachePolicyId or ForwardedValues, not both if cache_behavior.origin_request_policy_id != None: cache_behavior_dict[ 'OriginRequestPolicyId'] = cache_behavior.origin_request_policy_id if cache_behavior.cache_policy_id != None: cache_behavior_dict[ 'CachePolicyId'] = cache_behavior.cache_policy_id else: cb_forwarded_values_config = cache_behavior.forwarded_values cb_forwarded_values_dict = { 'QueryString': str(cb_forwarded_values_config.query_string) } # Cookies if cb_forwarded_values_config.cookies != None: cb_forwarded_values_dict['Cookies'] = { 'Forward': 'none' } cb_forwarded_values_dict['Cookies'][ 'Forward'] = cb_forwarded_values_config.cookies.forward if len(cb_forwarded_values_config.cookies. whitelisted_names) > 0: cb_forwarded_values_dict['Cookies'][ 'WhitelistedNames'] = cb_forwarded_values_config.cookies.whitelisted_names # Headers if cloudfront_config.s3_origin_exists() == False: cb_forwarded_values_dict[ 'Headers'] = cache_behavior.forwarded_values.headers cache_behavior_dict[ 'ForwardedValues'] = cb_forwarded_values_dict cache_behaviors_list.append(cache_behavior_dict) distribution_config_dict['CacheBehaviors'] = cache_behaviors_list # Origin Access Identity if cloudfront_config.s3_origin_exists() == True: origin_id_res = troposphere.cloudfront.CloudFrontOriginAccessIdentity( title='CloudFrontOriginAccessIdentity', template=template, CloudFrontOriginAccessIdentityConfig=troposphere.cloudfront. CloudFrontOriginAccessIdentityConfig( Comment=troposphere.Ref('AWS::StackName'))) troposphere.Output(title='CloudFrontOriginAccessIdentity', template=template, Value=troposphere.Ref(origin_id_res)) # Origins origins_list = [] for origin_name, origin in cloudfront_config.origins.items(): if origin.s3_bucket != None: domain_hash = utils.md5sum(str_data=origin.s3_bucket) origin_domain_name = self.paco_ctx.get_ref(origin.s3_bucket + '.url') else: domain_hash = utils.md5sum(str_data=origin.domain_name) origin_domain_name = origin.domain_name origin_dict = {'Id': origin_name, 'DomainName': origin_domain_name} if origin.s3_bucket == None: origin_dict['CustomOriginConfig'] = { 'OriginKeepaliveTimeout': origin.custom_origin_config.keepalive_timeout, 'OriginProtocolPolicy': origin.custom_origin_config.protocol_policy, 'OriginReadTimeout': origin.custom_origin_config.read_timeout, } if len(origin.custom_origin_config.ssl_protocols) > 0: origin_dict['CustomOriginConfig'][ 'OriginSSLProtocols'] = origin.custom_origin_config.ssl_protocols if origin.custom_origin_config.https_port != None: origin_dict['CustomOriginConfig'][ 'HTTPSPort'] = origin.custom_origin_config.https_port if origin.custom_origin_config.http_port: origin_dict['CustomOriginConfig']['HTTPPort'] = str( origin.custom_origin_config.http_port) else: s3_config = self.paco_ctx.get_ref(origin.s3_bucket) origin_dict['S3OriginConfig'] = {} if s3_config.cloudfront_origin == False: origin_dict['S3OriginConfig']['OriginAccessIdentity'] = '' else: origin_access_id_enabled = True param_name = "OriginAccessIdentiy" + domain_hash access_id_ref = origin.s3_bucket + '.origin_id' s3_cf_origin_id_param = self.create_cfn_parameter( param_type='String', name=param_name, description='Origin Access Identity', value=access_id_ref, ) origin_dict['S3OriginConfig'][ 'OriginAccessIdentity'] = troposphere.Sub( 'origin-access-identity/cloudfront/${OriginAccessId}', { 'OriginAccessId': troposphere.Ref(s3_cf_origin_id_param) }) origins_list.append(origin_dict) distribution_config_dict['Origins'] = origins_list # Custom Error error_resp_list = [] for error_resp in cloudfront_config.custom_error_responses: error_resp_dict = { 'ErrorCachingMinTTL': error_resp.error_caching_min_ttl, 'ErrorCode': error_resp.error_code, 'ResponseCode': error_resp.response_code, 'ResponsePagePath': error_resp.response_page_path } error_resp_list.append(error_resp_dict) if len(error_resp_list) > 0: distribution_config_dict['CustomErrorResponses'] = error_resp_list # Web ACL if cloudfront_config.webacl_id != None: webacl_id_value = cloudfront_config.webacl_id if is_ref(webacl_id_value): webacl_id_value = cloudfront_config.webacl_id + '.arn' webacl_id_param = self.create_cfn_parameter( param_type='String', name='WebAclId', description='WAF Web Acl Arn', value=webacl_id_value) distribution_config_dict['WebACLId'] = troposphere.Ref( webacl_id_param) distribution_dict = {'DistributionConfig': distribution_config_dict} distribution_res = troposphere.cloudfront.Distribution.from_dict( 'Distribution', distribution_dict) template.add_resource(distribution_res) self.create_output(title='CloudFrontURL', value=troposphere.GetAtt('Distribution', 'DomainName'), ref=self.config_ref + '.domain_name') self.create_output(title='CloudFrontId', value=troposphere.Ref(distribution_res), ref=self.config_ref + '.id') if self.paco_ctx.legacy_flag('route53_record_set_2019_10_16') == True: if cloudfront_config.is_dns_enabled() == True: for alias in cloudfront_config.domain_aliases: alias_hash = utils.md5sum(str_data=alias.domain_name) zone_param_name = 'AliasHostedZoneId' + alias_hash alias_zone_id_param = self.create_cfn_parameter( param_type='String', name=zone_param_name, description='Domain Alias Hosted Zone Id', value=alias.hosted_zone + '.id', ) record_set_res = troposphere.route53.RecordSetType( title=self.create_cfn_logical_id_join( ['RecordSet', alias_hash]), template=template, HostedZoneId=troposphere.Ref(alias_zone_id_param), Name=troposphere.Ref( aliases_param_map[alias.domain_name]), Type='A', AliasTarget=troposphere.route53.AliasTarget( DNSName=troposphere.GetAtt(distribution_res, 'DomainName'), HostedZoneId='Z2FDTNDATAQYW2')) record_set_res.DependsOn = distribution_res if origin_access_id_enabled: self.stack.wait_for_delete = True if self.paco_ctx.legacy_flag('route53_record_set_2019_10_16') == False: route53_ctl = self.paco_ctx.get_controller('route53') if cloudfront_config.is_dns_enabled() == True: for alias in cloudfront_config.domain_aliases: account_ctx = self.account_ctx if alias.hosted_zone: if is_ref(alias.hosted_zone): hosted_zone = get_model_obj_from_ref( alias.hosted_zone, self.paco_ctx.project) account_ctx = self.paco_ctx.get_account_context( account_ref=hosted_zone.account) route53_ctl.add_record_set( account_ctx, self.aws_region, cloudfront_config, enabled=cloudfront_config.is_enabled(), dns=alias, record_set_type='Alias', alias_dns_name='paco.ref ' + self.stack.stack_ref + '.domain_name', alias_hosted_zone_id= 'Z2FDTNDATAQYW2', # This is always the hosted zone ID when you create an alias record that routes traffic to a CloudFront distribution stack_group=self.stack.stack_group, async_stack_provision=True, config_ref=config_ref + '.record_set')
def cloudtrail_template(self, cloudtrail): "Template for CloudTrail" # Troposphere Template Initialization self.init_template(f'KMS Customer Managed Key (CMK) for CloudTrail') users = [] for user in cloudtrail.kms_users: if is_ref(user): user_obj = get_model_obj_from_ref(user, self.paco_ctx.project) user = user_obj.username users.append( f"arn:aws:iam::{self.paco_ctx.project['accounts']['master'].account_id}:user/{user}" ) accounts = [ f"arn:aws:cloudtrail:*:{account.account_id}:trail/*" for account in cloudtrail.get_accounts() ] cloudtrail_policy = PolicyDocument( Version='2012-10-17', Statement=[ Statement( Sid="Allows admin of the key", Effect=Allow, Principal=Principal( "AWS", [f'arn:aws:iam::{self.stack.account_ctx.id}:root']), Action=[ awacs.kms.CreateAlias, awacs.kms.CreateCustomKeyStore, awacs.kms.CreateGrant, awacs.kms.CreateKey, awacs.kms.DescribeCustomKeyStores, awacs.kms.DescribeKey, awacs.kms.EnableKey, awacs.kms.EnableKeyRotation, awacs.kms.ListAliases, awacs.kms.ListGrants, awacs.kms.ListKeyPolicies, awacs.kms.ListKeys, awacs.kms.ListResourceTags, awacs.kms.ListRetirableGrants, awacs.kms.PutKeyPolicy, awacs.kms.UpdateAlias, awacs.kms.UpdateCustomKeyStore, awacs.kms.UpdateKeyDescription, awacs.kms.RevokeGrant, awacs.kms.DisableKey, awacs.kms.DisableKeyRotation, awacs.kms.GetKeyPolicy, awacs.kms.GetKeyRotationStatus, awacs.kms.GetParametersForImport, awacs.kms.DeleteAlias, awacs.kms.DeleteCustomKeyStore, awacs.kms.DeleteImportedKeyMaterial, awacs.kms.ScheduleKeyDeletion, awacs.kms.CancelKeyDeletion, awacs.kms.TagResource ], Resource=['*'], ), Statement( Sid="Allow CloudTrail access", Effect=Allow, Principal=Principal("Service", ['cloudtrail.amazonaws.com']), Action=[awacs.kms.DescribeKey], Resource=['*'], ), Statement(Sid="Allow CloudTrail log decrypt permissions", Effect=Allow, Action=[awacs.kms.Decrypt], Principal=Principal("AWS", users), Resource=['*'], Condition=Condition([ Null({ "kms:EncryptionContext:aws:cloudtrail:arn": False }) ])), Statement(Sid="Allow CloudTrail to encrypt logs", Effect=Allow, Principal=Principal("Service", ["cloudtrail.amazonaws.com"]), Action=[awacs.kms.GenerateDataKey], Resource=['*'], Condition=Condition([ StringLike({ "kms:EncryptionContext:aws:cloudtrail:arn": accounts }) ])), ]) kms_dict = { 'Description': 'CMK for CloudTrail', 'EnableKeyRotation': True, 'KeyPolicy': cloudtrail_policy, } kms_res = troposphere.kms.Key.from_dict('KMS', kms_dict) self.template.add_resource(kms_res) # Outputs self.create_output( title='CMKArn', description="The CMK Arn", value=troposphere.GetAtt(kms_res, 'Arn'), ref=self.resource.paco_ref_parts + ".kms.arn", ) self.create_output( title='CMKId', description="The CMK Id", value=troposphere.Ref(kms_res), ref=self.resource.paco_ref_parts + ".kms.id", )
def __init__( self, stack, paco_ctx, grp_id=None, topics=None, ): enabled_topics = False config = stack.resource # this template is used as both SNSTopics by global resources and a # single SNSTopic for an application resource. if topics == None: if grp_id == None: topics = [stack.resource] enabled_topics = stack.resource.is_enabled() else: topics = config.values() for topic in topics: if topic.is_enabled(): enabled_topics = True else: if len(topics) > 0: enabled_topics = True super().__init__( stack, paco_ctx, enabled=enabled_topics, ) if grp_id == None: self.set_aws_name('SNS', self.resource_group_name, self.resource_name) else: self.set_aws_name('SNS', grp_id) # Troposphere Template Initialization self.init_template('SNS Topics and Subscriptions') template = self.template # Topic Resources and Outputs topics_ref_cross_list = [] for topic in topics: if not topic.is_enabled(): continue topic_logical_id = self.create_cfn_logical_id(topic.name) # Do not specify a TopicName, as then updates cannot be performed that require # replacement of this resource. cfn_export_dict = {} if topic.display_name: cfn_export_dict['DisplayName'] = topic.display_name # Topic Resource topic_resource = troposphere.sns.Topic.from_dict( 'Topic' + topic_logical_id, cfn_export_dict ) if topic.cross_account_access: topics_ref_cross_list.append(troposphere.Ref(topic_resource)) topic.topic_resource = topic_resource template.add_resource(topic_resource) # Subscriptions idx = 0 for subscription in topic.subscriptions: sub_dict = { 'TopicArn': troposphere.Ref(topic_resource) } if references.is_ref(subscription.endpoint): param_name = f'Endpoint{topic_logical_id}{idx}' parameter = self.create_cfn_parameter( param_type = 'String', name = param_name, description = 'Subscription Endpoint', value = subscription.endpoint, ) endpoint = parameter else: endpoint = subscription.endpoint sub_dict['Endpoint'] = endpoint sub_dict['Protocol'] = subscription.protocol if subscription.filter_policy: sub_dict['FilterPolicy'] = json.loads(subscription.filter_policy) subscription_logical_id = f"Subscription{topic_logical_id}{idx}" sub_resource = troposphere.sns.SubscriptionResource.from_dict( subscription_logical_id, sub_dict ) template.add_resource(sub_resource) idx += 1 # Topic Outputs if grp_id == None: output_ref = stack.resource.paco_ref_parts else: output_ref = '.'.join([stack.resource.paco_ref_parts, topic.name]) self.create_output( title='SNSTopicArn' + topic_logical_id, value=troposphere.Ref(topic_resource), ref=f'{output_ref}.arn' ) self.create_output( title='SNSTopicName' + topic_logical_id, value=troposphere.GetAtt(topic_resource, "TopicName"), ref=f'{output_ref}.name', ) # Cross-account access policy if len(topics_ref_cross_list) > 0: account_id_list = [ account.account_id for account in self.paco_ctx.project.accounts.values() ] topic_policy_resource = troposphere.sns.TopicPolicy( 'TopicPolicyCrossAccountPacoProject', Topics = topics_ref_cross_list, PolicyDocument = Policy( Version = '2012-10-17', Id = "CrossAccountPublish", Statement=[ Statement( Effect = Allow, Principal = Principal("AWS", "*"), Action = [ awacs.sns.Publish, awacs.sns.Subscribe ], Resource = [troposphere.Ref(topic_resource) ], Condition = Condition( StringEquals({ 'AWS:SourceOwner': account_id_list, }) ) ) ] ) ) template.add_resource(topic_policy_resource)
def __init__( self, stack, paco_ctx, ): super().__init__( stack, paco_ctx, iam_capabilities=["CAPABILITY_NAMED_IAM"], ) account_ctx = stack.account_ctx aws_region = stack.aws_region self.set_aws_name('Lambda', self.resource_group_name, self.resource_name) awslambda = self.awslambda = self.stack.resource self.init_template('Lambda Function') # if not enabled finish with only empty placeholder if not awslambda.is_enabled(): return self.code_bucket_name = None # Parameters sdb_cache_param = self.create_cfn_parameter( name='EnableSDBCache', param_type='String', description='Boolean indicating whether an SDB Domain will be created to be used as a cache.', value=awslambda.sdb_cache ) function_description_param = self.create_cfn_parameter( name='FunctionDescription', param_type='String', description='A description of the Lamdba Function.', value=awslambda.description ) handler_param = self.create_cfn_parameter( name='Handler', param_type='String', description='The name of the function to call upon execution.', value=awslambda.handler ) runtime_param = self.create_cfn_parameter( name='Runtime', param_type='String', description='The name of the runtime language.', value=awslambda.runtime ) role_arn_param = self.create_cfn_parameter( name='RoleArn', param_type='String', description='The execution role for the Lambda Function.', value=awslambda.iam_role.get_arn() ) role_name_param = self.create_cfn_parameter( name='RoleName', param_type='String', description='The execution role name for the Lambda Function.', value=awslambda.iam_role.resolve_ref_obj.role_name ) memory_size_param = self.create_cfn_parameter( name='MemorySize', param_type='Number', description="The amount of memory that your function has access to. Increasing the function's" + \ " memory also increases its CPU allocation. The default value is 128 MB. The value must be a multiple of 64 MB.", value=awslambda.memory_size ) reserved_conc_exec_param = self.create_cfn_parameter( name='ReservedConcurrentExecutions', param_type='Number', description='The number of simultaneous executions to reserve for the function.', value=awslambda.reserved_concurrent_executions ) timeout_param = self.create_cfn_parameter( name='Timeout', param_type='Number', description='The amount of time that Lambda allows a function to run before stopping it. ', value=awslambda.timeout ) layers_param = self.create_cfn_parameter( name='Layers', param_type='CommaDelimitedList', description='List of up to 5 Lambda Layer ARNs.', value=','.join(awslambda.layers) ) # create the Lambda resource cfn_export_dict = { 'Description': troposphere.Ref(function_description_param), 'Handler': troposphere.Ref(handler_param), 'MemorySize': troposphere.Ref(memory_size_param), 'Runtime': troposphere.Ref(runtime_param), 'Role': troposphere.Ref(role_arn_param), 'Timeout': troposphere.Ref(timeout_param), } if awslambda.reserved_concurrent_executions: cfn_export_dict['ReservedConcurrentExecutions'] = troposphere.Ref(reserved_conc_exec_param), if len(awslambda.layers) > 0: cfn_export_dict['Layers'] = troposphere.Ref(layers_param), # Lambda VPC if awslambda.vpc_config != None: vpc_security_group = self.create_cfn_ref_list_param( name='VpcSecurityGroupIdList', param_type='List<AWS::EC2::SecurityGroup::Id>', description='VPC Security Group Id List', value=awslambda.vpc_config.security_groups, ref_attribute='id', ) # Segment SubnetList is a Segment stack Output based on availability zones segment_ref = awslambda.vpc_config.segments[0] + '.subnet_id_list' subnet_list_param = self.create_cfn_parameter( name='VpcSubnetIdList', param_type='List<AWS::EC2::Subnet::Id>', description='VPC Subnet Id List', value=segment_ref ) cfn_export_dict['VpcConfig'] = { 'SecurityGroupIds': troposphere.Ref(vpc_security_group), 'SubnetIds': troposphere.Ref(subnet_list_param), } # Code object: S3 Bucket, inline ZipFile or deploy artifact? if awslambda.code.s3_bucket: if awslambda.code.s3_bucket.startswith('paco.ref '): value = awslambda.code.s3_bucket + ".name" else: value = awslambda.code.s3_bucket s3bucket_param = self.create_cfn_parameter( name='CodeS3Bucket', description="An Amazon S3 bucket in the same AWS Region as your function. The bucket can be in a different AWS account.", param_type='String', value=value ) s3key_param = self.create_cfn_parameter( name='CodeS3Key', description="The Amazon S3 key of the deployment package.", param_type='String', value=awslambda.code.s3_key ) cfn_export_dict['Code'] = { 'S3Bucket': troposphere.Ref(s3bucket_param), 'S3Key': troposphere.Ref(s3key_param), } else: zip_path = Path(awslambda.code.zipfile) if zip_path.is_file(): if zipfile.is_zipfile(zip_path): cfn_s3_code = self.prepare_s3bucket_artifact(is_zip=True) cfn_export_dict['Code'] = cfn_s3_code else: # Code is inline in CloudFormation template # ToDo: make this legacy or optional behaviour and create a Zip and upload to S3 Bucket cfn_export_dict['Code'] = { 'ZipFile': zip_path.read_text() } elif zip_path.is_dir(): cfn_s3_code = self.prepare_s3bucket_artifact() cfn_export_dict['Code'] = cfn_s3_code # Environment variables var_export = {} if awslambda.environment != None and awslambda.environment.variables != None: for var in awslambda.environment.variables: name = var.key.replace('_','') env_param = self.create_cfn_parameter( name='EnvVar{}'.format(name), param_type='String', description='Env var for {}'.format(name), value=var.value, ) var_export[var.key] = troposphere.Ref(env_param) if awslambda.sdb_cache == True: var_export['SDB_CACHE_DOMAIN'] = troposphere.Ref('LambdaSDBCacheDomain') if len(awslambda.log_group_names) > 0: # Add PACO_LOG_GROUPS Environment Variable paco_log_groups = [ prefixed_name(awslambda, loggroup_name, self.paco_ctx.legacy_flag) for loggroup_name in awslambda.log_group_names ] paco_log_groups_param = self.create_cfn_parameter( name='EnvVariablePacoLogGroups', param_type='String', description='Env var for Paco Log Groups', value=','.join(paco_log_groups), ) var_export['PACO_LOG_GROUPS'] = troposphere.Ref(paco_log_groups_param) cfn_export_dict['Environment'] = { 'Variables': var_export } # Lambda resource self.awslambda_resource = troposphere.awslambda.Function.from_dict( 'Function', cfn_export_dict ) self.template.add_resource(self.awslambda_resource) # Published Version if self.awslambda.edge != None and self.awslambda.edge.auto_publish_version != None: version_name = self.create_cfn_logical_id(f"AutoPublishedVersion{self.awslambda.edge.auto_publish_version}") version_resource = troposphere.awslambda.Version( title=version_name, FunctionName=troposphere.Ref(self.awslambda_resource), Description="AutoPublished by Paco", ) self.template.add_resource(version_resource) self.create_output( title='AutoPublishedFunctionVersionArn', value=troposphere.Ref(version_resource), ref=awslambda.paco_ref_parts + '.autoversion.arn', ) self.create_output( title='AutoPublishedFunctionVersion', value=troposphere.GetAtt(version_resource, "Version"), ref=awslambda.paco_ref_parts + '.autoversion.version', ) # SDB Cache with SDB Domain and SDB Domain Policy resources if awslambda.sdb_cache == True: sdb_domain_resource = troposphere.sdb.Domain( title='LambdaSDBCacheDomain', template=self.template, Description="Lambda Function Domain" ) sdb_policy = troposphere.iam.Policy( title='LambdaSDBCacheDomainPolicy', template=self.template, PolicyName='SDBDomain', PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Action=[Action("sdb","*")], Resource=[ troposphere.Sub( 'arn:aws:sdb:${AWS::Region}:${AWS::AccountId}:domain/${DomainName}', DomainName=troposphere.Ref('LambdaSDBCacheDomain') ) ], ) ], Roles=troposphere.Ref(role_arn_param) ) ) sdb_policy.DependsOn = sdb_domain_resource self.awslambda_resource.DependsOn = sdb_domain_resource # Permissions # SNS Topic Lambda permissions and subscription idx = 1 for sns_topic in awslambda.sns_topics: if is_ref(sns_topic): sns_topic_value = sns_topic + '.arn' else: sns_topic_value = sns_topic # SNS Topic Arn parameters param_name = 'SNSTopicArn%d' % idx self.create_cfn_parameter( name=param_name, param_type='String', description='An SNS Topic ARN to grant permission to.', value=sns_topic_value ) # Lambda permission troposphere.awslambda.Permission( title=param_name + 'Permission', template=self.template, Action="lambda:InvokeFunction", FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'), Principal='sns.amazonaws.com', SourceArn=troposphere.Ref(param_name), ) idx += 1 # Lambda permissions for connected Paco resources app = get_parent_by_interface(awslambda, schemas.IApplication) for obj in get_all_nodes(app): # S3 Bucket notification permission(s) if schemas.IS3Bucket.providedBy(obj): seen = {} if hasattr(obj, 'notifications'): if hasattr(obj.notifications, 'lambdas'): for lambda_notif in obj.notifications.lambdas: if lambda_notif.function == awslambda.paco_ref: # yes, this Lambda gets notification from this S3Bucket group = get_parent_by_interface(obj, schemas.IResourceGroup) s3_logical_name = self.gen_cf_logical_name(group.name + obj.name, '_') if s3_logical_name not in seen: troposphere.awslambda.Permission( title='S3Bucket' + s3_logical_name, template=self.template, Action="lambda:InvokeFunction", FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'), Principal='s3.amazonaws.com', SourceArn='arn:aws:s3:::' + obj.get_bucket_name(), ) seen[s3_logical_name] = True # Events Rule permission(s) if schemas.IEventsRule.providedBy(obj): seen = {} for target in obj.targets: target_ref = Reference(target.target) target_ref.set_account_name(account_ctx.get_name()) target_ref.set_region(aws_region) lambda_ref = Reference(awslambda.paco_ref) if target_ref.raw == lambda_ref.raw: # yes, the Events Rule has a Target that is this Lambda group = get_parent_by_interface(obj, schemas.IResourceGroup) eventsrule_logical_name = self.gen_cf_logical_name(group.name + obj.name, '_') if eventsrule_logical_name not in seen: rule_name = create_event_rule_name(obj) # rule_name = self.create_cfn_logical_id("EventsRule" + obj.paco_ref) # rule_name = hash_smaller(rule_name, 64) source_arn = 'arn:aws:events:{}:{}:rule/{}'.format( aws_region, account_ctx.id, rule_name ) troposphere.awslambda.Permission( title='EventsRule' + eventsrule_logical_name, template=self.template, Action="lambda:InvokeFunction", FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'), Principal='events.amazonaws.com', SourceArn=source_arn, ) seen[eventsrule_logical_name] = True # IoT Analytics permission(s) if schemas.IIoTAnalyticsPipeline.providedBy(obj): seen = {} for activity in obj.pipeline_activities.values(): if activity.activity_type == 'lambda': target_ref = Reference(activity.function) target_ref.set_account_name(account_ctx.get_name()) target_ref.set_region(aws_region) lambda_ref = Reference(awslambda.paco_ref) if target_ref.raw == lambda_ref.raw: # yes, the IoT Analytics Lambda Activity has a ref to this Lambda group = get_parent_by_interface(obj, schemas.IResourceGroup) iotap_logical_name = self.gen_cf_logical_name(group.name + obj.name, '_') if iotap_logical_name not in seen: rule_name = create_event_rule_name(obj) troposphere.awslambda.Permission( title='IoTAnalyticsPipeline' + iotap_logical_name, template=self.template, Action="lambda:InvokeFunction", FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'), Principal='iotanalytics.amazonaws.com', ) seen[iotap_logical_name] = True # Log group(s) loggroup_function_name = troposphere.Join( '', [ '/aws/lambda/', troposphere.Select( 6, troposphere.Split(':', troposphere.GetAtt(self.awslambda_resource, 'Arn')) ) ] ) loggroup_resources = [] loggroup_resources.append( self.add_log_group(loggroup_function_name, 'lambda') ) if len(awslambda.log_group_names) > 0: # Additional App-specific LogGroups for loggroup_name in awslambda.log_group_names: # Add LogGroup to the template prefixed_loggroup_name = prefixed_name(awslambda, loggroup_name, self.paco_ctx.legacy_flag) loggroup_resources.append( self.add_log_group(prefixed_loggroup_name) ) # LogGroup permissions log_group_arns = [ troposphere.Join(':', [ f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group', loggroup_function_name, '*' ]) ] log_stream_arns = [ troposphere.Join(':', [ f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group', loggroup_function_name, 'log-stream', '*' ]) ] for loggroup_name in awslambda.log_group_names: prefixed_loggroup_name = prefixed_name(awslambda, loggroup_name, self.paco_ctx.legacy_flag) log_group_arns.append( f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group:{prefixed_loggroup_name}:*' ) log_stream_arns.append( f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group:{prefixed_loggroup_name}:log-stream:*' ) loggroup_policy_resource = troposphere.iam.ManagedPolicy( title='LogGroupManagedPolicy', PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Sid='AllowLambdaModifyLogStreams', Effect=Allow, Action=[ Action("logs","CreateLogStream"), Action("logs","DescribeLogStreams"), ], Resource=log_group_arns, ), Statement( Sid='AllowLambdaPutLogEvents', Effect=Allow, Action=[ Action("logs","PutLogEvents"), ], Resource=log_stream_arns, ), ], ), Roles=[troposphere.Ref(role_name_param)], ) loggroup_policy_resource.DependsOn = loggroup_resources self.template.add_resource(loggroup_policy_resource) # Outputs self.create_output( title='FunctionName', value=troposphere.Ref(self.awslambda_resource), ref=awslambda.paco_ref_parts + '.name', ) self.create_output( title='FunctionArn', value=troposphere.GetAtt(self.awslambda_resource, 'Arn'), ref=awslambda.paco_ref_parts + '.arn', )
def init_resource(self): # is this for Lambda@Edge? edge_enabled = False if self.resource.region != None: self.aws_region = self.resource.region if self.resource.edge != None and self.resource.edge.is_enabled(): edge_enabled = True # Create function execution role role_name = 'iam_role' if self.resource.iam_role and self.resource.iam_role.enabled == False: role_config_yaml = """ instance_profile: false path: / role_name: %s""" % ("LambdaFunction") role_config_dict = yaml.load(role_config_yaml) role_config = models.iam.Role(role_name, self.resource) role_config.apply_config(role_config_dict) else: role_config = self.resource.iam_role # Note that CloudWatch LogGroup permissions are added in the Lambda stack # This is to allow CloudFormation to create the LogGroup to manage it's Retention policy # and to prevent the Lambda from being invoked and writing to the LogGroup before it's # created by CloudFormation and creating a LogGroup and causing a race condition in the stack. # Also, by setting the Policy after the Lambda it's possible to restrict the policy to just # the Lambda LogGroups and not leave it wide open like AWSLambdaBasicExecutionRole does. if self.resource.vpc_config != None: # ToDo: Security: restrict resource vpc_config_policy = """ name: VPCAccess statement: - effect: Allow action: - ec2:CreateNetworkInterface - ec2:DescribeNetworkInterfaces - ec2:DeleteNetworkInterface resource: - '*' """ role_config.add_policy(yaml.load(vpc_config_policy)) # The ID to give this role is: group.resource.iam_role iam_role_id = self.gen_iam_role_id(self.res_id, role_name) # If no assume policy has been added, force one here since we know its # a Lambda function using it. # Set defaults if assume role policy was not explicitly configured if not hasattr(role_config, 'assume_role_policy' ) or role_config.assume_role_policy == None: service = ['lambda.amazonaws.com'] # allow Edge if it's enabled if edge_enabled: service.append('edgelambda.amazonaws.com') policy_dict = { 'effect': 'Allow', 'aws': [f"arn:aws:iam::{self.account_ctx.get_id()}:root"], 'service': service } role_config.set_assume_role_policy(policy_dict) # Always turn off instance profiles for Lambda functions role_config.instance_profile = False role_config.enabled = self.resource.is_enabled() iam_ctl = self.paco_ctx.get_controller('IAM') iam_ctl.add_role(region=self.aws_region, resource=self.resource, role=role_config, iam_role_id=iam_role_id, stack_group=self.stack_group, stack_tags=self.stack_tags) self.stack = self.stack_group.add_new_stack(self.aws_region, self.resource, paco.cftemplates.Lambda, stack_tags=self.stack_tags) # Provision Lambda subscriptions in the same region as the SNS Topics # This is required for cross account + cross region lambda/sns region_topic_list = {} for topic in self.resource.sns_topics: if is_ref(topic): region_name = topic.split('.')[4] else: region_name = topic.split(':')[3] if region_name not in vocabulary.aws_regions.keys(): raise exception.InvalidAWSRegion( f'Invalid SNS Topic region in reference: {region_name}: {topic}' ) if region_name not in region_topic_list.keys(): region_topic_list[region_name] = [] region_topic_list[region_name].append(topic) for region_name in region_topic_list.keys(): topic_list = region_topic_list[region_name] self.stack = self.stack_group.add_new_stack( region_name, self.resource, paco.cftemplates.LambdaSNSSubscriptions, stack_tags=self.stack_tags, extra_context={'sns_topic_list': topic_list})
def __init__(self, paco_ctx, account_ctx, aws_region, stack_group, stack_tags, env_ctx, app_id, grp_id, asg_id, asg_config, asg_config_ref, role_profile_arn, ec2_manager_user_data_script, ec2_manager_cache_id): self.env_ctx = env_ctx self.ec2_manager_cache_id = ec2_manager_cache_id segment_stack = self.env_ctx.get_segment_stack(asg_config.segment) # Super Init: super().__init__(paco_ctx, account_ctx, aws_region, enabled=asg_config.is_enabled(), config_ref=asg_config_ref, stack_group=stack_group, stack_tags=stack_tags, change_protected=asg_config.change_protected) self.set_aws_name('ASG', grp_id, asg_id) self.asg_config = asg_config # Troposphere self.init_template('AutoScalingGroup: ' + self.ec2_manager_cache_id) template = self.template # InstanceAMI Parameter is preserved in disabled templates so it can be smoothly disabled/enabled if self.asg_config.instance_ami_ignore_changes: ignore_changes = True else: ignore_changes = False instance_ami_param = self.create_cfn_parameter( param_type='String', name='InstanceAMI', description='The Amazon Machine Image Id to launch instances with.', value=asg_config.instance_ami, ignore_changes=ignore_changes, ) # if the network for the ASG is disabled, only use an empty placeholder env_region = get_parent_by_interface(asg_config, schemas.IEnvironmentRegion) if not env_region.network.is_enabled(): self.set_template(template.to_yaml()) return security_group_list_param = self.create_cfn_ref_list_param( param_type='List<AWS::EC2::SecurityGroup::Id>', name='SecurityGroupList', description= 'List of security group ids to attach to the ASG instances.', value=asg_config.security_groups, ref_attribute='id', ) instance_key_pair_param = self.create_cfn_parameter( param_type='String', name='InstanceKeyPair', description='The EC2 SSH KeyPair to assign each ASG instance.', value=asg_config.instance_key_pair + '.keypair_name', ) launch_config_dict = { 'AssociatePublicIpAddress': asg_config.associate_public_ip_address, 'EbsOptimized': asg_config.ebs_optimized, 'ImageId': troposphere.Ref(instance_ami_param), 'InstanceMonitoring': asg_config.instance_monitoring, 'InstanceType': asg_config.instance_type, 'KeyName': troposphere.Ref(instance_key_pair_param), 'SecurityGroups': troposphere.Ref(security_group_list_param), } # BlockDeviceMappings if len(asg_config.block_device_mappings) > 0: mappings = [] for bdm in asg_config.block_device_mappings: mappings.append(bdm.cfn_export_dict) launch_config_dict["BlockDeviceMappings"] = mappings user_data_script = '' if ec2_manager_user_data_script != None: user_data_script += ec2_manager_user_data_script if asg_config.user_data_script != '': user_data_script += asg_config.user_data_script.replace( '#!/bin/bash', '') if user_data_script != '': user_data_64 = base64.b64encode(user_data_script.encode('ascii')) user_data_script_param = self.create_cfn_parameter( param_type='String', name='UserDataScript', description='User data script to run at instance launch.', value=user_data_64.decode('ascii'), ) launch_config_dict['UserData'] = troposphere.Ref( user_data_script_param) if role_profile_arn != None: launch_config_dict['IamInstanceProfile'] = role_profile_arn # CloudFormation Init if asg_config.cfn_init and asg_config.is_enabled(): launch_config_dict['Metadata'] = troposphere.autoscaling.Metadata( asg_config.cfn_init.export_as_troposphere()) for key, value in asg_config.cfn_init.parameters.items(): if type(value) == type(str()): param_type = 'String' elif type(value) == type(int()) or type(value) == type( float()): param_type = 'Number' else: raise UnsupportedCloudFormationParameterType( "Can not cast {} of type {} to a CloudFormation Parameter type." .format(value, type(value))) cfn_init_param = self.create_cfn_parameter( param_type=param_type, name=key, description='CloudFormation Init Parameter {} for ASG {}'. format(key, asg_config.name), value=value, ) # Launch Configuration resource launch_config_res = troposphere.autoscaling.LaunchConfiguration.from_dict( 'LaunchConfiguration', launch_config_dict) template.add_resource(launch_config_res) subnet_list_ref = 'paco.ref {}'.format( segment_stack.template.config_ref) if asg_config.availability_zone == 'all': subnet_list_ref += '.subnet_id_list' else: subnet_list_ref += '.az{}.subnet_id'.format( asg_config.availability_zone) asg_subnet_list_param = self.create_cfn_parameter( param_type='List<AWS::EC2::Subnet::Id>', name='ASGSubnetList', description='A list of subnets where the ASG will launch instances', value=subnet_list_ref) min_instances = asg_config.min_instances if asg_config.is_enabled( ) else 0 desired_capacity = asg_config.desired_capacity if asg_config.is_enabled( ) else 0 desired_capacity_param = self.create_cfn_parameter( param_type='String', name='DesiredCapacity', description='The desired capacity of instances to run in the ASG.', value=desired_capacity, ignore_changes=self.asg_config.desired_capacity_ignore_changes, ) asg_dict = { 'AutoScalingGroupName': asg_config.get_aws_name(), 'DesiredCapacity': troposphere.Ref(desired_capacity_param), 'HealthCheckGracePeriod': asg_config.health_check_grace_period_secs, 'LaunchConfigurationName': troposphere.Ref(launch_config_res), 'MaxSize': asg_config.max_instances, 'MinSize': min_instances, 'Cooldown': asg_config.cooldown_secs, 'HealthCheckType': asg_config.health_check_type, 'TerminationPolicies': asg_config.termination_policies, 'VPCZoneIdentifier': troposphere.Ref(asg_subnet_list_param), } if asg_config.load_balancers != None and len( asg_config.load_balancers) > 0: load_balancer_names_param = self.create_cfn_ref_list_param( param_type='List<String>', name='LoadBalancerNames', description= 'A list of load balancer names to attach to the ASG', value=asg_config.load_balancers, ) asg_dict['LoadBalancerNames'] = troposphere.Ref( load_balancer_names_param) if asg_config.is_enabled(): if asg_config.target_groups != None and len( asg_config.target_groups) > 0: asg_dict['TargetGroupARNs'] = [] for target_group_arn in asg_config.target_groups: target_group_arn_param = self.create_cfn_parameter( param_type='String', name='TargetGroupARNs' + utils.md5sum(str_data=target_group_arn), description='A Target Group ARNs to attach to the ASG', value=target_group_arn + '.arn', ) asg_dict['TargetGroupARNs'].append( troposphere.Ref(target_group_arn_param)) if asg_config.monitoring != None and \ asg_config.monitoring.is_enabled() == True and \ len(asg_config.monitoring.asg_metrics) > 0: asg_dict['MetricsCollection'] = [{ 'Granularity': '1Minute', 'Metrics': asg_config.monitoring.asg_metrics }] # ASG Tags asg_dict['Tags'] = [ troposphere.autoscaling.Tag('Name', asg_dict['AutoScalingGroupName'], True) ] # EIP if asg_config.eip != None and asg_config.is_enabled(): if references.is_ref(asg_config.eip) == True: eip_value = asg_config.eip + '.allocation_id' else: eip_value = asg_config.eip eip_id_param = self.create_cfn_parameter( param_type='String', name='EIPAllocationId', description= 'The allocation Id of the EIP to attach to the instance.', value=eip_value, ) asg_dict['Tags'].append( troposphere.autoscaling.Tag('Paco-EIP-Allocation-Id', troposphere.Ref(eip_id_param), True)) # EFS FileSystemId Tags if asg_config.is_enabled(): for efs_mount in asg_config.efs_mounts: target_hash = utils.md5sum(str_data=efs_mount.target) if references.is_ref(efs_mount.target) == True: efs_value = efs_mount.target + '.id' else: efs_value = efs_mount.target efs_id_param = self.create_cfn_parameter( param_type='String', name='EFSId' + target_hash, description='EFS Id', value=efs_value, ) asg_tag = troposphere.autoscaling.Tag( 'efs-id-' + target_hash, troposphere.Ref(efs_id_param), True) asg_dict['Tags'].append(asg_tag) # EBS Volume Id and Device name Tags for ebs_volume_mount in asg_config.ebs_volume_mounts: if ebs_volume_mount.is_enabled() == False: continue volume_hash = utils.md5sum(str_data=ebs_volume_mount.volume) if references.is_ref(ebs_volume_mount.volume) == True: ebs_volume_id_value = ebs_volume_mount.volume + '.id' else: ebs_volume_id_value = ebs_volume_mount.volume # Volume Id ebs_volume_id_param = self.create_cfn_parameter( param_type='String', name='EBSVolumeId' + volume_hash, description='EBS Volume Id', value=ebs_volume_id_value) ebs_volume_id_tag = troposphere.autoscaling.Tag( 'ebs-volume-id-' + volume_hash, troposphere.Ref(ebs_volume_id_param), True) asg_dict['Tags'].append(ebs_volume_id_tag) #ebs_device_param = self.create_cfn_parameter( # param_type='String', # name='EBSDevice'+volume_hash, # description='EBS Device Name', # value=ebs_volume_mount.device, #) #ebs_device_tag = troposphere.autoscaling.Tag( # 'ebs-device-' + volume_hash, # troposphere.Ref(ebs_device_param), # True #) #asg_dict['Tags'].append(ebs_device_tag) asg_res = troposphere.autoscaling.AutoScalingGroup.from_dict( 'ASG', asg_dict) template.add_resource(asg_res) asg_res.DependsOn = launch_config_res max_batch_size = 1 min_instances_in_service = 0 pause_time = 'PT0S' wait_on_resource_signals = False if asg_config.is_enabled() == True: if asg_config.rolling_update_policy != None: if asg_config.rolling_update_policy.is_enabled(): max_batch_size = asg_config.rolling_update_policy.max_batch_size min_instances_in_service = asg_config.rolling_update_policy.min_instances_in_service pause_time = asg_config.rolling_update_policy.pause_time wait_on_resource_signals = asg_config.rolling_update_policy.wait_on_resource_signals else: max_batch_size = asg_config.update_policy_max_batch_size min_instances_in_service = asg_config.update_policy_min_instances_in_service asg_res.UpdatePolicy = troposphere.policies.UpdatePolicy( AutoScalingRollingUpdate=troposphere.policies. AutoScalingRollingUpdate( MaxBatchSize=max_batch_size, MinInstancesInService=min_instances_in_service, PauseTime=pause_time, WaitOnResourceSignals=wait_on_resource_signals)) self.create_output(title='ASGName', value=troposphere.Ref(asg_res), description='Auto Scaling Group Name', ref=[asg_config_ref, asg_config_ref + '.name']) # CPU Scaling Policy if asg_config.scaling_policy_cpu_average > 0: troposphere.autoscaling.ScalingPolicy( title='CPUAverageScalingPolicy', template=template, AutoScalingGroupName=troposphere.Ref(asg_res), PolicyType='TargetTrackingScaling', TargetTrackingConfiguration=troposphere.autoscaling. TargetTrackingConfiguration( PredefinedMetricSpecification=troposphere.autoscaling. PredefinedMetricSpecification( PredefinedMetricType='ASGAverageCPUUtilization'), TargetValue=float(asg_config.scaling_policy_cpu_average))) if asg_config.scaling_policies != None: for scaling_policy_name in asg_config.scaling_policies.keys(): scaling_policy = asg_config.scaling_policies[ scaling_policy_name] if scaling_policy.is_enabled() == False: continue scaling_policy_res = troposphere.autoscaling.ScalingPolicy( title=self.create_cfn_logical_id_join( ['ScalingPolicy', scaling_policy_name], camel_case=True), template=template, AdjustmentType=scaling_policy.adjustment_type, AutoScalingGroupName=troposphere.Ref(asg_res), PolicyType=scaling_policy.policy_type, ScalingAdjustment=scaling_policy.scaling_adjustment, Cooldown=scaling_policy.cooldown) alarm_idx = 0 for alarm in scaling_policy.alarms: dimension_list = [] for dimension in alarm.dimensions: dimension_value = dimension.value if dimension.name == 'AutoScalingGroupName' and references.is_ref( dimension.value): # Reference the local ASG if the ref points here dimension_ref = Reference(dimension.value) if dimension_ref.ref == self.config_ref: dimension_value = troposphere.Ref(asg_res) dimension_res = troposphere.cloudwatch.MetricDimension( Name=dimension.name, Value=dimension_value) dimension_list.append(dimension_res) if len(dimension_list) == 0: dimension_list = troposphere.Ref('AWS::NoValue') # Alarm Resource troposphere.cloudwatch.Alarm( title=self.create_cfn_logical_id_join([ 'ScalingPolicyAlarm', scaling_policy_name, str(alarm_idx) ], camel_case=True), template=template, ActionsEnabled=True, AlarmActions=[troposphere.Ref(scaling_policy_res)], AlarmDescription=alarm.alarm_description, ComparisonOperator=alarm.comparison_operator, MetricName=alarm.metric_name, Namespace=alarm.namespace, Period=alarm.period, Threshold=alarm.threshold, EvaluationPeriods=alarm.evaluation_periods, Statistic=alarm.statistic, Dimensions=dimension_list) alarm_idx += 1 if asg_config.lifecycle_hooks != None: for lifecycle_hook_name in asg_config.lifecycle_hooks: lifecycle_hook = asg_config.lifecycle_hooks[ lifecycle_hook_name] if lifecycle_hook.is_enabled() == False: continue troposphere.autoscaling.LifecycleHook( title=self.create_cfn_logical_id_join( ['LifecycleHook', lifecycle_hook_name], camel_case=True), template=template, AutoScalingGroupName=troposphere.Ref(asg_res), DefaultResult=lifecycle_hook.default_result, LifecycleTransition=lifecycle_hook.lifecycle_transition, RoleARN=lifecycle_hook.role_arn, NotificationTargetARN=lifecycle_hook. notification_target_arn) self.set_template()
def __init__(self, paco_ctx, account_ctx, aws_region, stack_group, stack_tags, grp_id, res_id, config, res_config_ref): enabled_topics = False for topic in config: if topic.is_enabled(): enabled_topics = True super().__init__( paco_ctx, account_ctx, aws_region, config_ref=res_config_ref, stack_group=stack_group, stack_tags=stack_tags, enabled=enabled_topics, ) self.set_aws_name('SNSTopics', grp_id, res_id) self.config = config # Troposphere Template Initialization self.init_template('SNS Topics') template = self.template # Topic Resources and Outputs any_topic_enabled = False topics_ref_cross_list = [] for topic in self.config: if not topic.is_enabled(): continue any_topic_enabled = True topic_logical_id = self.create_cfn_logical_id(topic.name) # Do not specify a TopicName, as then updates cannot be performed that require # replacement of this resource. cfn_export_dict = {} if topic.display_name: cfn_export_dict['DisplayName'] = topic.display_name # Subscriptions if len(topic.subscriptions) > 0: cfn_export_dict['Subscription'] = [] for subscription in topic.subscriptions: sub_dict = {} if references.is_ref(subscription.endpoint): param_name = 'Endpoint{}'.format(topic_logical_id) parameter = self.create_cfn_parameter( param_type='String', name=param_name, description='SNSTopic Endpoint value', value=subscription.endpoint, ) endpoint = parameter else: endpoint = subscription.endpoint sub_dict['Endpoint'] = endpoint sub_dict['Protocol'] = subscription.protocol cfn_export_dict['Subscription'].append(sub_dict) topic_resource = troposphere.sns.Topic.from_dict( 'Topic' + topic_logical_id, cfn_export_dict) if topic.cross_account_access: topics_ref_cross_list.append(troposphere.Ref(topic_resource)) topic.topic_resource = topic_resource template.add_resource(topic_resource) # Topic Outputs output_ref = '.'.join([res_config_ref, topic.name]) self.create_output(title='SNSTopicArn' + topic_logical_id, value=troposphere.Ref(topic_resource), ref=output_ref + '.arn') self.create_output( title='SNSTopicName' + topic_logical_id, value=troposphere.GetAtt(topic_resource, "TopicName"), ref=output_ref + '.name', ) # Cross-account access policy if len(topics_ref_cross_list) > 0: account_id_list = [ account.account_id for account in self.paco_ctx.project.accounts.values() ] topic_policy_resource = troposphere.sns.TopicPolicy( 'TopicPolicyCrossAccountPacoProject', Topics=topics_ref_cross_list, PolicyDocument=Policy(Version='2012-10-17', Id="CrossAccountPublish", Statement=[ Statement( Effect=Allow, Principal=Principal("AWS", "*"), Action=[awacs.sns.Publish], Resource=topics_ref_cross_list, Condition=Condition( StringEquals({ 'AWS:SourceOwner': account_id_list, }))) ])) template.add_resource(topic_policy_resource) self.enabled = any_topic_enabled # Generate the Template self.set_template()
def __init__(self, stack, paco_ctx): super().__init__( stack, paco_ctx, iam_capabilities=["CAPABILITY_NAMED_IAM"], ) eventsrule = stack.resource config_ref = eventsrule.paco_ref_parts self.set_aws_name('EventsRule', self.resource_group_name, self.resource_name) self.notification_groups = {} # Init a Troposphere template self.init_template('CloudWatch EventsRule') if eventsrule.is_enabled() == False: return # Parameters schedule_expression_param = None if eventsrule.schedule_expression: schedule_expression_param = self.create_cfn_parameter( param_type='String', name='ScheduleExpression', description='ScheduleExpression for the Event Rule.', value=eventsrule.schedule_expression, ) description_param = self.create_cfn_parameter( param_type='String', name='EventDescription', description='Description for the Event Rule.', value=eventsrule.description, ) # Monitoring Target monitoring = self.resource.monitoring if monitoring != None and monitoring.is_enabled() == True: notifications = None if monitoring.notifications != None and len( monitoring.notifications.keys()) > 0: notifications = monitoring.notifications else: app_config = get_parent_by_interface(self.resource, schemas.IApplication) notifications = app_config.notifications if notifications != None and len(notifications.keys()) > 0: # Create the CF Param for the SNS ARN we need to Publish to notify_param_cache = [] for notify_group_name in notifications.keys(): for sns_group_name in notifications[ notify_group_name].groups: notify_param = self.create_notification_param( sns_group_name) # Only append if the are unique if notify_param not in notify_param_cache: eventsrule.targets.append(notify_param) notify_param_cache.append(notify_param) # Targets targets = [] self.target_params = {} target_invocation_role_resource = None for index in range(0, len(eventsrule.targets)): target = eventsrule.targets[index] # Target Parameters target_name = 'Target{}'.format(index) # Target CFN Parameters # Check if we already have a parameter object target_policy_actions = None if isinstance(target, troposphere.Parameter): self.target_params[target_name + 'Arn'] = target else: self.target_params[target_name + 'Arn'] = self.create_cfn_parameter( param_type='String', name=target_name + 'Arn', description=target_name + ' Arn for the Events Rule.', value=target.target + '.arn', ) # If the target is a reference, get the target object from the model # to check what type of resource we need to configure for target_ref = Reference(target.target) if target_ref.parts[-1] == 'project' and target_ref.parts[ -3] == 'build': codebuild_target_ref = f'paco.ref {".".join(target_ref.parts[:-1])}' target_model_obj = get_model_obj_from_ref( codebuild_target_ref, self.paco_ctx.project) else: target_model_obj = get_model_obj_from_ref( target.target, self.paco_ctx.project) # Lambda Policy Actions if schemas.IDeploymentPipelineBuildCodeBuild.providedBy( target_model_obj): # CodeBuild Project target_policy_actions = [awacs.codebuild.StartBuild] elif schemas.ILambda.providedBy(target_model_obj): # Lambda Function target_policy_actions = [awacs.awslambda.InvokeFunction] self.target_params[target_name] = self.create_cfn_parameter( param_type='String', name=target_name, description=target_name + ' for the Event Rule.', value=target_name, ) # IAM Role Polcies by Resource type if target_policy_actions != None: # IAM Role Resources to allow Event to invoke Target target_invocation_role_resource = troposphere.iam.Role( 'TargetInvocationRole', AssumeRolePolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement(Effect=Allow, Action=[awacs.sts.AssumeRole], Principal=Principal( 'Service', ['events.amazonaws.com'])) ], ), Policies=[ troposphere.iam.Policy( PolicyName="TargetInvocation", PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Action=target_policy_actions, Resource=[ troposphere.Ref( self.target_params[target_name + 'Arn']) ], ) ])) ], ) self.template.add_resource(target_invocation_role_resource) # Create Target CFN Resources cfn_export_dict = { 'Arn': troposphere.Ref(self.target_params[target_name + 'Arn']), 'Id': troposphere.Ref(self.target_params[target_name]) } if target_invocation_role_resource != None: cfn_export_dict['RoleArn'] = troposphere.GetAtt( target_invocation_role_resource, 'Arn') if hasattr(target, 'input_json') and target.input_json != None: cfn_export_dict['Input'] = target.input_json # Events Rule Targets targets.append(cfn_export_dict) # Events Rule Resource # The Name is needed so that a Lambda can be created and it's Lambda ARN output # can be supplied as a Parameter to this Stack and a Lambda Permission can be # made with the Lambda. Avoids circular dependencies. name = create_event_rule_name(eventsrule) if eventsrule.enabled_state: enabled_state = 'ENABLED' else: enabled_state = 'DISABLED' events_rule_dict = { 'Name': name, 'Description': troposphere.Ref(description_param), 'Targets': targets, 'State': enabled_state } if target_invocation_role_resource != None: events_rule_dict['RoleArn'] = troposphere.GetAtt( target_invocation_role_resource, 'Arn') if schedule_expression_param != None: events_rule_dict['ScheduleExpression'] = troposphere.Ref( schedule_expression_param) elif eventsrule.event_pattern != None: source_value_list = [] project_name_list = [] for pattern_source in eventsrule.event_pattern.source: if is_ref(pattern_source): source_obj = get_model_obj_from_ref( pattern_source, self.paco_ctx.project) if schemas.IDeploymentPipelineBuildCodeBuild.providedBy( source_obj): source_value_list.append('aws.codebuild') project_name_list.append( source_obj._stack.template.get_project_name()) else: raise InvalidEventsRuleEventPatternSource( pattern_source) else: source_value_list.append(pattern_source) if len(project_name_list) > 0: eventsrule.event_pattern.detail[ 'project-name'] = project_name_list event_pattern_dict = { 'source': source_value_list, 'detail-type': utils.obj_to_dict(eventsrule.event_pattern.detail_type), 'detail': utils.obj_to_dict(eventsrule.event_pattern.detail), } event_pattern_yaml = yaml.dump(event_pattern_dict) events_rule_dict['EventPattern'] = yaml.load(event_pattern_yaml) else: # Defaults to a CodePipeline events rule event_pattern_yaml = """ source: - aws.codepipeline detail-type: - 'CodePipeline Pipeline Execution State Change' detail: state: - STARTED """ events_rule_dict['EventPattern'] = yaml.load(event_pattern_yaml) event_rule_resource = troposphere.events.Rule.from_dict( 'EventRule', events_rule_dict) if target_invocation_role_resource != None: event_rule_resource.DependsOn = target_invocation_role_resource self.template.add_resource(event_rule_resource) # Outputs self.create_output( title="EventRuleId", value=troposphere.Ref(event_rule_resource), ref=config_ref + '.id', ) self.create_output( title="EventRuleArn", value=troposphere.GetAtt(event_rule_resource, "Arn"), ref=config_ref + '.arn', )
def __init__( self, stack, paco_ctx, grp_id=None, topics=None, ): enabled_topics = False config = stack.resource # this template is used as both SNSTopics by global resources and a # single SNSTopic for an application resource. if topics == None: if grp_id == None: topics = [stack.resource] enabled_topics = stack.resource.is_enabled() else: topics = config.values() for topic in topics: if topic.is_enabled(): enabled_topics = True else: if len(topics) > 0: enabled_topics = True super().__init__( stack, paco_ctx, enabled=enabled_topics, ) if grp_id == None: self.set_aws_name('SNSTopics', self.resource_group_name, self.resource_name) else: self.set_aws_name('SNSTopics', grp_id) # Troposphere Template Initialization self.init_template('SNS Topics') template = self.template # Topic Resources and Outputs topics_ref_cross_list = [] topic_policy_cache = [] for topic in topics: if not topic.is_enabled(): continue statement_list = [] topic_logical_id = self.create_cfn_logical_id(topic.name) # Do not specify a TopicName, as then updates cannot be performed that require # replacement of this resource. cfn_export_dict = {} if topic.display_name: cfn_export_dict['DisplayName'] = topic.display_name # Subscriptions if len(topic.subscriptions) > 0: cfn_export_dict['Subscription'] = [] for subscription in topic.subscriptions: sub_dict = {} if references.is_ref(subscription.endpoint): param_name = 'Endpoint{}'.format(topic_logical_id) parameter = self.create_cfn_parameter( param_type='String', name=param_name, description='SNSTopic Endpoint value', value=subscription.endpoint, ) endpoint = parameter else: endpoint = subscription.endpoint sub_dict['Endpoint'] = endpoint sub_dict['Protocol'] = subscription.protocol cfn_export_dict['Subscription'].append(sub_dict) topic_resource = troposphere.sns.Topic.from_dict( 'Topic' + topic_logical_id, cfn_export_dict) topic.topic_resource = topic_resource template.add_resource(topic_resource) if topic.codestar_notification_access: statement = Statement( Effect=Allow, Sid='CodeStarNotificationAccess', Principal=Principal( "Service", 'codestar-notifications.amazonaws.com'), Action=[awacs.sns.Publish], Resource=[troposphere.Ref(topic_resource)], ) statement_list.append(statement) # Add CloudWatch service statement = Statement( Effect=Allow, Sid='CloudWatchService', Principal=Principal("Service", 'cloudwatch.amazonaws.com'), Action=[awacs.sns.Publish], Resource=[troposphere.Ref(topic_resource)], ) statement_list.append(statement) if topic.cross_account_access: account_id_list = [ account.account_id for account in self.paco_ctx.project.accounts.values() ] for account_id in account_id_list: if account_id in topic_policy_cache: continue topic_policy_cache.append(account_id) statement = Statement( Effect=Allow, Sid=self.create_cfn_logical_id_join(account_id), Principal=Principal("AWS", f'arn:aws:iam::{account_id}:root'), Action=[awacs.sns.Publish, awacs.sns.Subscribe], Resource=[troposphere.Ref(topic_resource)], ) statement_list.append(statement) if len(statement_list) > 0: topic_policy_resource = troposphere.sns.TopicPolicy( f'Paco{topic_logical_id}TopicPolicy', Topics=[troposphere.Ref(topic_resource)], PolicyDocument=Policy(Version='2012-10-17', Id="PacoSNSTopicPolicy", Statement=statement_list)) template.add_resource(topic_policy_resource) # Topic Outputs if grp_id == None: output_ref = stack.resource.paco_ref_parts else: output_ref = '.'.join( [stack.resource.paco_ref_parts, topic.name]) self.create_output(title='SNSTopicArn' + topic_logical_id, value=troposphere.Ref(topic_resource), ref=output_ref + '.arn') self.create_output( title='SNSTopicName' + topic_logical_id, value=troposphere.GetAtt(topic_resource, "TopicName"), ref=output_ref + '.name', )
def __init__(self, stack, paco_ctx, task_execution_role): ecs_config = stack.resource super().__init__(stack, paco_ctx) self.set_aws_name('ECS Services', self.resource_group_name, self.resource.name) self.init_template('Elastic Container Service (ECS) Services and TaskDefinitions') if not ecs_config.is_enabled(): return # Task Execution Role task_execution_role_param = self.create_cfn_parameter( name='TaskExecutionRole', param_type='String', description='Task Execution Role', value=task_execution_role.get_arn(), ) # TaskDefinitions for task in ecs_config.task_definitions.values(): task_dict = task.cfn_export_dict task_dict['ExecutionRoleArn'] = troposphere.Ref(task_execution_role_param) index = 0 task._depends_on = [] for container_definition in task.container_definitions.values(): # ContainerDefinition Environment variables for env_pair in container_definition.environment: key = env_pair.name value = env_pair.value # only paco refs are passed as Parameters to avoid tripping the 60 Parameter CloudFormation limit if references.is_ref(value): if type(value) == type(str()): param_type = 'String' elif type(value) == type(int()) or type(value) == type(float()): param_type = 'Number' else: raise UnsupportedCloudFormationParameterType( "Can not cast {} of type {} to a CloudFormation Parameter type.".format( value, type(value) ) ) param_name = self.create_cfn_logical_id(f'{task.name}{container_definition.name}{key}') environment_param = self.create_cfn_parameter( param_type=param_type, name=param_name, description=f'Environment variable for container definition {container_definition.name} for task definition {task.name}', value=value, ) value = troposphere.Ref(environment_param) if 'Environment' not in task_dict['ContainerDefinitions'][index]: task_dict['ContainerDefinitions'][index]['Environment'] = [] task_dict['ContainerDefinitions'][index]['Environment'].append({'Name': key, 'Value': value}) # Image can be a paco.ref to an ECR Repository if references.is_ref(container_definition.image): param_name = self.create_cfn_logical_id(f'{task.name}{container_definition.name}Image') image_arn_param = self.create_cfn_parameter( param_type='String', name=param_name, description=f'Image used to start the container.', value=container_definition.image + '.arn', ) # The ECR URL needs to break apart the ARN and re-assemble it as the URL is no provided as a Stack Output :( task_dict['ContainerDefinitions'][index]['Image'] = troposphere.Join( ':', [ troposphere.Join( '/', [ # domain portion: aws_account_id.dkr.ecr.region.amazonaws.com troposphere.Join( '.', [ troposphere.Select(4, troposphere.Split(':', troposphere.Ref(image_arn_param))), # account id 'dkr', 'ecr', troposphere.Select(3, troposphere.Split(':', troposphere.Ref(image_arn_param))), # region 'amazonaws', 'com', ] ), troposphere.Select(1, troposphere.Split('/', troposphere.Ref(image_arn_param))) # ecr-repo-name ] ), container_definition.image_tag # image tag ] ) else: task_dict['ContainerDefinitions'][index]['Image'] = container_definition.image if getattr(container_definition, 'logging') != None: task_dict['ContainerDefinitions'][index]['LogConfiguration'] = {} log_dict = task_dict['ContainerDefinitions'][index]['LogConfiguration'] log_dict['LogDriver'] = container_definition.logging.driver # Only awslogs supported for now if container_definition.logging.driver == 'awslogs': log_dict['Options'] = {} log_dict['Options']['awslogs-region'] = troposphere.Ref('AWS::Region') prefixed_log_group_name = prefixed_name(container_definition, task.name) log_group_resource = self.add_log_group(prefixed_log_group_name, container_definition.logging.expire_events_after_days) log_dict['Options']['awslogs-group'] = troposphere.Ref(log_group_resource) task._depends_on.append(log_group_resource) log_dict['Options']['awslogs-stream-prefix'] = container_definition.name index += 1 # Setup Secrets for task_dict_container_def in task_dict['ContainerDefinitions']: if 'Secrets' in task_dict_container_def: for secrets_pair in task_dict_container_def['Secrets']: # Secerts Arn Parameters name_hash = md5sum(str_data=secrets_pair['ValueFrom']) secret_param_name = 'TaskDefinitionSecretArn'+name_hash secret_param = self.create_cfn_parameter( param_type='String', name=secret_param_name, description='The arn of the Secrets Manger Secret.', value=secrets_pair['ValueFrom']+'.arn' ) secrets_pair['ValueFrom'] = '!ManualTroposphereRef '+secret_param_name task_res = troposphere.ecs.TaskDefinition.from_dict( self.create_cfn_logical_id('TaskDefinition' + task.name), task_dict, ) task_res.DependsOn = task._depends_on self.template.add_resource(task_res) task._troposphere_res = task_res # Cluster Param cluster_param = self.create_cfn_parameter( name='Cluster', param_type='String', description='Cluster Name', value=ecs_config.cluster + '.name', ) # Services # ToDo: allow multiple PrivateDnsNamespaces? # e.g. if multiple ECSServices want to particpate in the same PrivateDnsNamespace? if ecs_config.service_discovery_namespace_name != '': private_dns_vpc_param = self.create_cfn_parameter( param_type='String', name='PrivateDnsNamespaceVpc', description='The Vpc for the Service Discovery Private DNS Namespace.', value='paco.ref ' + '.'.join(ecs_config.paco_ref_parts.split('.')[:4]) + '.network.vpc.id' ) private_dns_namespace_res = troposphere.servicediscovery.PrivateDnsNamespace( title=self.create_cfn_logical_id(f'DiscoveryService{ecs_config.service_discovery_namespace_name}'), Name=ecs_config.service_discovery_namespace_name, Vpc=troposphere.Ref(private_dns_vpc_param), ) self.template.add_resource(private_dns_namespace_res) for service in ecs_config.services.values(): service_dict = service.cfn_export_dict # Service Discovery if service.hostname != None: service_discovery_res = troposphere.servicediscovery.Service( title=self.create_cfn_logical_id(f'DiscoveryService{service.name}'), DnsConfig=troposphere.servicediscovery.DnsConfig( DnsRecords=[ # troposphere.servicediscovery.DnsRecord( # TTL='60', # Type='A' # ), troposphere.servicediscovery.DnsRecord( TTL='60', Type='SRV' ) ] ), HealthCheckCustomConfig=troposphere.servicediscovery.HealthCheckCustomConfig(FailureThreshold=float(1)), NamespaceId=troposphere.Ref(private_dns_namespace_res), Name=service.name, ) service_discovery_res.DependsOn = [private_dns_namespace_res] self.template.add_resource(service_discovery_res) service_dict['ServiceRegistries'] = [] for load_balancer in service.load_balancers: service_registry_dict = { 'RegistryArn': troposphere.GetAtt(service_discovery_res, 'Arn'), 'ContainerName': load_balancer.container_name, 'ContainerPort': load_balancer.container_port, } # ToDo: add Port when needed ... 'Port': ?, service_dict['ServiceRegistries'].append(service_registry_dict) # convert TargetGroup ref to a Parameter lb_idx = 0 if 'LoadBalancers' in service_dict: for lb in service_dict['LoadBalancers']: target_group_ref = lb['TargetGroupArn'] tg_param = self.create_cfn_parameter( name=self.create_cfn_logical_id(f'TargetGroup{service.name}{lb_idx}'), param_type='String', description='Target Group ARN', value=target_group_ref + '.arn', ) lb['TargetGroupArn'] = troposphere.Ref(tg_param) lb_idx += 1 # Replace TaskDefinition name with a TaskDefinition ARN if 'TaskDefinition' in service_dict: service_dict['TaskDefinition'] = troposphere.Ref( ecs_config.task_definitions[service_dict['TaskDefinition']]._troposphere_res ) service_dict['Cluster'] = troposphere.Ref(cluster_param) service_res = troposphere.ecs.Service.from_dict( self.create_cfn_logical_id('Service' + service.name), service_dict ) # Outputs self.create_output( title=service_res.title + 'Name', description="Service Name", value=troposphere.GetAtt(service_res, 'Name'), ref=service.paco_ref_parts + ".name" ) self.template.add_resource(service_res)