def add_ui_customizations_cache(self, hook, cup): value = '' if cup.ui_customizations.logo_file != None: value += md5sum(bytes_data=cup.ui_customizations.logo_file) if cup.ui_customizations.css_file != None: value += md5sum(str_data=cup.ui_customizations.css_file) return value
def is_changed(self, capacity_provider_info): local_md5 = md5sum(str_data=f"{self.asg.paco_ref}-{self.capacity_provider.target_capacity}-{self.capacity_provider.minimum_scaling_step_size}-{self.capacity_provider.maximum_scaling_step_size}") aws_target_capacity = capacity_provider_info['autoScalingGroupProvider']['managedScaling']['targetCapacity'] aws_minimum_scaling_step_size = capacity_provider_info['autoScalingGroupProvider']['managedScaling']['minimumScalingStepSize'] aws_maximum_scaling_step_size = capacity_provider_info['autoScalingGroupProvider']['managedScaling']['maximumScalingStepSize'] aws_md5 = md5sum(str_data=f"{self.asg.paco_ref}-{aws_target_capacity}-{aws_minimum_scaling_step_size}-{aws_maximum_scaling_step_size}") if aws_md5 != local_md5: return True return False
def provision_ecs_capacity_provider_cache(self, hook, asg): "Cache method for ECS ASG" cp = asg.ecs.capacity_provider return md5sum( str_data= f"{asg.paco_ref}-{cp.is_enabled()}-{cp.target_capacity}-{cp.minimum_scaling_step_size}-{cp.maximum_scaling_step_size}" )
def init_lambda_code(paco_buckets, resource, src, account_ctx, aws_region, is_zip=False): "Creates an S3 Bucket and uploads an artifact only if one does not yet exist" zip_output = src artifact_prefix = f'Paco/LambdaArtifacts/{resource.paco_ref_parts}' # create Zip file from src directory if not is_zip: src_dir = Path(src) if not src_dir.exists(): raise InvalidFilesystemPath( f"Source directory for Lambda code does not exist: {src}") zip_output, md5_hash = create_zip_artifact(artifact_prefix, src_dir) # create md5 of Zip file else: md5_hash = md5sum(src) artifact_name = f'Paco/LambdaArtifacts/{resource.paco_ref_parts}-{md5_hash}.zip' if paco_buckets.is_object_in_bucket(artifact_name, account_ctx, aws_region): bucket_name = paco_buckets.get_bucket_name(account_ctx, aws_region) else: bucket_name, artifact_name = upload_lambda_code( paco_buckets, zip_output, artifact_name, account_ctx, aws_region) return bucket_name, artifact_name, md5_hash
def set_alarm_actions_to_cfn_export(self, alarm, cfn_export_dict): "Sets the AlarmActions, OKActions and InsufficientDataActions for a Troposphere dict" alarm_action_list = [] notification_groups = self.paco_ctx.project['resource'][ 'notificationgroups'][alarm.region_name] for alarm_action in alarm.get_alarm_actions_paco_refs( notification_groups): # Create parameter param_name = 'AlarmAction{}'.format( utils.md5sum(str_data=alarm_action)) if param_name in self.alarm_action_param_map.keys(): alarm_action_param = self.alarm_action_param_map[param_name] else: alarm_action_param = self.create_cfn_parameter( param_type='String', name=param_name, description='SNSTopic for Alarm to notify.', value=alarm_action) self.alarm_action_param_map[param_name] = alarm_action_param alarm_action_list.append(troposphere.Ref(alarm_action_param)) cfn_export_dict['AlarmActions'] = alarm_action_list if getattr(alarm, 'enable_ok_actions', False): cfn_export_dict['OKActions'] = alarm_action_list if getattr(alarm, 'enable_insufficient_data_actions', False): cfn_export_dict['InsufficientDataActions'] = alarm_action_list
def create_notification_params(self, alarm): "Create a Parameter for each SNS Topic an alarm should notify. Return a list of Refs to those Params." notification_paco_refs = [] for group in alarm.notification_groups: if not self.notification_region: region = alarm.region_name else: region = self.notification_region notification_paco_refs.append( self.paco_ctx.project['resource']['snstopics'][region][group].paco_ref + '.arn' ) notification_cfn_refs = [] for notification_paco_ref in notification_paco_refs: # Create parameter param_name = 'Notification{}'.format(utils.md5sum(str_data=notification_paco_ref)) if param_name in self.notification_param_map.keys(): notification_param = self.notification_param_map[param_name] else: notification_param = self.create_cfn_parameter( param_type='String', name=param_name, description='SNS Topic to notify', value=notification_paco_ref, min_length=1, # prevent borked empty values from breaking notification ) self.notification_param_map[param_name] = notification_param notification_cfn_refs.append(troposphere.Ref(notification_param)) return notification_cfn_refs
def set_alarm_actions_to_cfn_export(self, alarm, cfn_export_dict): "Sets the AlarmActions, OKActions and InsufficientDataActions for a Troposphere dict" alarm_action_list = [] alarm_parent = get_parent_by_interface(alarm, schemas.IResource) alarm_account = alarm_parent.get_account() if alarm_account.name not in self.paco_ctx.project['resource']['sns'].computed.keys() or \ alarm.region_name not in self.paco_ctx.project['resource']['sns'].computed[alarm_account.name].keys(): message = f'Could not find SNS topics for account "{alarm_account.name}" in region "{alarm.region_name}"' raise MissingSNSTopics(message) notification_groups = self.paco_ctx.project['resource'][ 'sns'].computed[alarm_account.name][alarm.region_name] for alarm_action in alarm.get_alarm_actions_paco_refs( notification_groups): # Create parameter param_name = 'AlarmAction{}'.format( utils.md5sum(str_data=alarm_action)) if param_name in self.alarm_action_param_map.keys(): alarm_action_param = self.alarm_action_param_map[param_name] else: alarm_action_param = self.create_cfn_parameter( param_type='String', name=param_name, description='SNSTopic for Alarm to notify.', value=alarm_action) self.alarm_action_param_map[param_name] = alarm_action_param alarm_action_list.append(troposphere.Ref(alarm_action_param)) cfn_export_dict['AlarmActions'] = alarm_action_list if getattr(alarm, 'enable_ok_actions', False): cfn_export_dict['OKActions'] = alarm_action_list if getattr(alarm, 'enable_insufficient_data_actions', False): cfn_export_dict['InsufficientDataActions'] = alarm_action_list
def stack_hook_eventsrule_state_cache_id(self, hook, config): state_list = [] for source in self.resource.event_pattern.source: state_config = self.gen_state_config(source) state_list.append(state_config) return utils.md5sum(str_data=json.dumps(state_list))
def iam_user_create_access_key(self, username, key_num, key_version, iam_client, sdb_client): sdb_item_name = md5sum(str_data=username) access_key_meta = iam_client.create_access_key( UserName=username ) access_key_id = access_key_meta['AccessKey']['AccessKeyId'] secret_key = access_key_meta['AccessKey']['SecretAccessKey'] version_attribute = access_key_meta['AccessKey']['AccessKeyId']+'Version' key_num_attribute = access_key_meta['AccessKey']['AccessKeyId']+'KeyNum' self.put_sdb_attribute( sdb_client, self.iam_user_access_keys_sdb_domain, sdb_item_name, version_attribute, key_version ) self.put_sdb_attribute( sdb_client, self.iam_user_access_keys_sdb_domain, sdb_item_name, key_num_attribute, key_num ) print("{}: Created Access Key {}: Key Id : {}".format(username, key_num, access_key_id)) print("{}: {}: Secret Key: {}".format(username, key_num, secret_key))
def gen_policy_name(self, policy_id): policy_context_hash = md5sum( str_data=self.policy_context['ref'])[:8].upper() policy_name = self.create_resource_name_join( name_list=[policy_context_hash, policy_id], separator='-', camel_case=False, filter_id='IAM.ManagedPolicy.ManagedPolicyName') return policy_name
def gen_iam_role_name(self, role_type, role_ref, role_id): "Generate a name valid in CloudFormation" iam_context_hash = md5sum(str_data=role_ref)[:8].upper() role_name = self.create_resource_name_join( name_list=[iam_context_hash, role_type[0], role_id], separator='-', camel_case=True, filter_id='IAM.Role.RoleName') return role_name
def gen_policy_name(self, policy_name): "Generate a name valid in CloudFormation" policy_ref_hash = md5sum( str_data=self.policy.paco_ref_parts)[:8].upper() policy_name = self.create_resource_name_join( name_list=[policy_ref_hash, policy_name], separator='-', camel_case=False, filter_id='IAM.ManagedPolicy.ManagedPolicyName') return policy_name
def iam_user_access_keys_hook_cache_id(self, hook, iamuser): "Cache value for AWS Access Key" cache_data = "AccessKeysCacheId" if iamuser.programmatic_access != None: cache_data += str(iamuser.programmatic_access.enabled) cache_data += str(iamuser.programmatic_access.access_key_1_version) cache_data += str(iamuser.programmatic_access.access_key_2_version) cache_id = md5sum(str_data=cache_data) return cache_id
def iam_user_access_keys_hook_cache_id(self, hook, user_config): cache_data = "AccessKeysCacheId" if user_config.programmatic_access != None: access_config = user_config.programmatic_access cache_data += str(access_config.enabled) cache_data += str(access_config.access_key_1_version) cache_data += str(access_config.access_key_2_version) cache_id = md5sum(str_data=cache_data) return cache_id
def __init__(self, stack, paco_ctx, awslambda): super().__init__(stack, paco_ctx) self.set_aws_name('ApiGatewayLamdaPermission', self.resource_group_name, self.resource_name) self.init_template('Cross-account Api Gateway Lambda Permission') apigateway = self.resource api_gateway_id_param = self.create_cfn_parameter( name=self.create_cfn_logical_id('ApiGatewayRestApiId'), param_type='String', description='API Gateway Rest API Id', value=apigateway.paco_ref + '.id', ) lambda_arn_param = self.create_cfn_parameter( name=self.create_cfn_logical_id('LambdaArn'), param_type='String', description='Lambda Arn', value=awslambda.paco_ref + '.arn', ) # Lambda Permission for cross-account API Gateway invocation for method in apigateway.methods.values(): if method.integration != None and method.integration.integration_lambda != None: if awslambda.paco_ref == method.integration.integration_lambda: if apigateway.get_account().name != awslambda.get_account().name: # Grant Cross-Account API Gateway permission path_part = '' if method.resource_name: name_parts = method.resource_name.split('.') resource = method.get_resource() if len(name_parts) > 1: # child resource last_resource = resource while schemas.IApiGatewayResource.providedBy(resource): last_resource = resource resource = resource.__parent__.__parent__ path_part = last_resource.path_part + '/*' # add /* to match all child resource else: # parent resource path_part = resource.path_part lambda_permission_resource = troposphere.awslambda.Permission( title='ApiGatewayRestApiMethod' + md5sum(str_data=method.paco_ref), Action="lambda:InvokeFunction", FunctionName=troposphere.Ref(lambda_arn_param), Principal='apigateway.amazonaws.com', SourceArn=troposphere.Join('', [ "arn:aws:execute-api:", awslambda.region_name, # lambda region ":", apigateway.get_account().account_id, # account id ":", troposphere.Ref(api_gateway_id_param), f"/*/{method.http_method}/{path_part}", ]) ) self.template.add_resource(lambda_permission_resource)
def codecommit_post_stack_hook_cache_id(self, hook, config): cache_data = "" for repo_group in config.values(): for repo_config in repo_group.values(): if repo_config.users != None: for user_config in repo_config.users.values(): if user_config.public_ssh_key != None: cache_data += user_config.public_ssh_key cache_id = md5sum(str_data=cache_data) return cache_id
def gen_iam_role_name(self, role_type, role): "Generate a name valid in CloudFormation" if role.global_role_name == True: return f'{role.role_name}-{role_type[0]}' role_id = self.resource.name + '-' + role.name iam_context_hash = md5sum(str_data=role.paco_ref_parts)[:8].upper() role_name = self.create_resource_name_join( name_list=[iam_context_hash, role_type[0], role_id], separator='-', camel_case=True, filter_id='IAM.Role.RoleName') return role_name
def create_zip_artifact(artifact_prefix, src_dir): "create zip file from directory" # patch make_archive so that it includes symbolic links # ToDo: excludes __pycache__ - make the excluded files depend upon Lambda runtime shutil._ARCHIVE_FORMATS['zip'] = (patched_make_zipfile, [], "ZIP file") zip_output = tempfile.gettempdir() + os.sep + artifact_prefix + '.zip' if src_dir.is_file(): zipfile.ZipFile(zip_output, mode='w').write(src_dir, basename(src_dir)) else: shutil.make_archive(zip_output, 'zip', str(src_dir)) md5_hash = md5sum(zip_output) return zip_output, md5_hash
def __init__( self, stack, paco_ctx, ): secrets_config = stack.resource config_ref = secrets_config.paco_ref_parts super().__init__( stack, paco_ctx, iam_capabilities=["CAPABILITY_NAMED_IAM"], ) self.set_aws_name('SecretsManager') self.init_template('Secrets Manager') is_enabled = False account_ref = self.account_ctx.paco_ref for secret_app in secrets_config.values(): for secret_group in secret_app.values(): for secret_name in secret_group.keys(): secret_config = secret_group[secret_name] if secret_config.is_enabled() == False: continue # Secrets only created in the accounts they are declared/assumed if secret_config._account_ref != account_ref: continue is_enabled = True secret_hash = utils.md5sum( str_data=secret_config.paco_ref_parts) # Secret resource cfn_export_dict = {'Name': secret_config.paco_ref_parts} if secret_config.generate_secret_string.enabled: cfn_export_dict[ 'GenerateSecretString'] = secret_config.generate_secret_string.cfn_export_dict else: # Secret will be changed later cfn_export_dict['SecretString'] = 'placeholder' secret_resource = troposphere.secretsmanager.Secret.from_dict( self.create_cfn_logical_id('Secret' + secret_hash), cfn_export_dict) self.template.add_resource(secret_resource) # Secret resource Output self.create_output( title=self.create_cfn_logical_id('Secret' + secret_hash + 'Arn'), value=troposphere.Ref(secret_resource), ref=secret_config.paco_ref_parts + '.arn') self.set_enabled(is_enabled)
def resource_name_filter(self, name, filter_id, hash_long_names): "Checks a name against a filter and raises a StackException if it is not a valid AWS name" # Duplicated in paco.models.base.Resource message = None max_name_len = None if filter_id in [ 'EC2.ElasticLoadBalancingV2.LoadBalancer.Name', 'EC2.ElasticLoadBalancingV2.TargetGroup.Name' ]: if len(name) > 32: max_name_len = 32 message = "Name must not be longer than 32 characters.", elif filter_id.find('LoadBalancer') != -1 and name.startswith( 'internal-'): message = "Name must not start with 'internal-'" elif name[-1] == '-' or name[0] == '-': message = "Name must not begin or end with a dash." elif filter_id in [ 'IAM.Role.RoleName', 'IAM.ManagedPolicy.ManagedPolicyName' ]: if len(name) > 255: max_name_len = 255 message = "Name must not be longer than 255 characters." elif filter_id == 'IAM.Policy.PolicyName': if len(name) > 128: max_name_len = 128 message = "Name must not be longer than 128 characters." elif filter_id == 'ElastiCache.ReplicationGroup.ReplicationGroupId': if len(name) > 40: max_name_len = 255 message = "ReplicationGroupId must be 40 characters or less" elif filter_id == 'SecurityGroup.GroupName': pass else: message = 'Unknown filter_id' if max_name_len != None and hash_long_names == True: message = None name_hash = md5sum(str_data=name)[:8].upper() name = name_hash + '-' + name[((max_name_len - 9) * -1):] if message != None: raise StackException(PacoErrorCode.Unknown, message="{}: {}: {}: {}".format( filter_id, self.config_ref, message, name, )) return name
def create_iam_resource_name(self, name_list, filter_id=None): role_name = self.create_resource_name_join(name_list=name_list, separator='-', camel_case=True, filter_id=filter_id) if len(role_name) > 64: name_hash = md5sum(str_data=role_name)[:8].upper() # len('AABBCCDD-') name_hash_len = len(name_hash + '-') + 1 max_role_name_len = 64 role_name = name_hash + '-' + role_name[-(max_role_name_len - name_hash_len):] return role_name
def create_notification_param(self, group): "Create a CFN Parameter for a Notification Group" notification_ref = self.paco_ctx.project['resource']['sns'].computed[self.account_ctx.name][self.stack.aws_region][group].paco_ref + '.arn' # Re-use existing Parameter or create new one param_name = 'Notification{}'.format(utils.md5sum(str_data=notification_ref)) if param_name not in self.notification_groups: notification_param = self.create_cfn_parameter( param_type='String', name=param_name, description='SNS Topic to notify', value=notification_ref, min_length=1, # prevent borked empty values from breaking notification ) self.notification_groups[param_name] = notification_param return self.notification_groups[param_name]
def init_manual_approval_action(self, template, action_config): self.manual_approval_is_enabled = action_config.is_enabled() # Manual Approval Deploy Action subscription_list = [] for approval_email in action_config.manual_approval_notification_email: email_hash = utils.md5sum(str_data=approval_email) manual_approval_notification_email_param = self.create_cfn_parameter( param_type='String', name='ManualApprovalNotificationEmail'+email_hash, description='Email to send notifications to when a deployment requires approval.', value=approval_email, ) subscription_list.append( troposphere.sns.Subscription( Endpoint=troposphere.Ref(manual_approval_notification_email_param), Protocol = 'email' ) ) manual_approval_sns_res = troposphere.sns.Topic( title = 'ManualApprovalSNSTopic', template=template, Condition = 'ManualApprovalIsEnabled', TopicName = troposphere.Sub('${ResourceNamePrefix}-Approval'), Subscription = subscription_list ) manual_deploy_action = troposphere.codepipeline.Actions( Name='Approval', ActionTypeId = troposphere.codepipeline.ActionTypeId( Category = 'Approval', Owner = 'AWS', Version = '1', Provider = 'Manual' ), Configuration = { 'NotificationArn': troposphere.Ref(manual_approval_sns_res), }, RunOrder = action_config.run_order ) manual_deploy_action = troposphere.If( 'ManualApprovalIsEnabled', manual_deploy_action, troposphere.Ref('AWS::NoValue') ) return manual_deploy_action
def __init__(self, paco_ctx, account_ctx, aws_region, stack_group, stack_tags, secrets_config, config_ref): super().__init__(paco_ctx, account_ctx, aws_region, config_ref=config_ref, iam_capabilities=["CAPABILITY_NAMED_IAM"], stack_group=stack_group, stack_tags=stack_tags) self.set_aws_name('SecretsManager') self.init_template('Secrets Manager') self.paco_ctx.log_action_col("Init", "Secrets", "Manager") is_enabled = False for secret_app in secrets_config.values(): for secret_group in secret_app.values(): for secret_name in secret_group.keys(): secret_config = secret_group[secret_name] if secret_config.is_enabled() == False: continue is_enabled = True secret_hash = utils.md5sum( str_data=secret_config.paco_ref_parts) # Secret resource cfn_export_dict = {'Name': secret_config.paco_ref_parts} if secret_config.generate_secret_string.enabled: cfn_export_dict[ 'GenerateSecretString'] = secret_config.generate_secret_string.cfn_export_dict else: # Secret will be changed later cfn_export_dict['SecretString'] = 'placeholder' secret_resource = troposphere.secretsmanager.Secret.from_dict( self.create_cfn_logical_id('Secret' + secret_hash), cfn_export_dict) self.template.add_resource(secret_resource) # Secret resource Output self.create_output( title=self.create_cfn_logical_id('Secret' + secret_hash + 'Arn'), value=troposphere.Ref(secret_resource), ref=secret_config.paco_ref_parts + '.arn') self.enabled = is_enabled self.set_template()
def iam_user_delete_access_key(self, username, key_config, iam_client, sdb_client): access_key_id = key_config['access_key_id'] sdb_item_name = md5sum(str_data=username) iam_client.delete_access_key( UserName=username, AccessKeyId=access_key_id, ) version_attribute = [access_key_id+'Version', key_config['version']] key_num_attribute = [access_key_id+'KeyNum', key_config['key_num']] for attribute_conf in [version_attribute, key_num_attribute]: self.delete_sdb_attribute( sdb_client, self.iam_user_access_keys_sdb_domain, sdb_item_name, attribute_conf[0], attribute_conf[1], ) print("{}: Deleted Access Key {}: Key Id : {}".format(username, key_config['key_num'], access_key_id))
def create_group_param_ref(self, group_ref, template): """ Creates a Security Group Id parameter and returns a Ref() to it. It caches the parameter to allow multiple references from a single Parameter. """ # legacy_flag: aim_name_2019_11_28 - hash with aim.ref instead of paco.ref hash_ref = group_ref if self.paco_ctx.legacy_flag('aim_name_2019_11_28') == True: hash_ref = 'aim' + group_ref[4:] group_ref_hash = utils.md5sum(str_data=hash_ref) if group_ref_hash in self.source_group_param_cache.keys(): return troposphere.Ref(self.source_group_param_cache[group_ref_hash]) source_sg_param = self.create_cfn_parameter( param_type='AWS::EC2::SecurityGroup::Id', name='SourceGroupId' + group_ref_hash, description='Source Security Group - ' + hash_ref, value=group_ref + '.id', ) self.source_group_param_cache[group_ref_hash] = source_sg_param return troposphere.Ref(self.source_group_param_cache[group_ref_hash])
def create_param_from_ref(self, group_ref, param_type, param_name, param_description, ref_att): """ Creates a Security Group Id parameter and returns a Ref() to it. It caches the parameter to allow multiple references from a single Parameter. """ # legacy_flag: aim_name_2019_11_28 - hash with aim.ref instead of paco.ref hash_ref = group_ref if self.paco_ctx.legacy_flag('aim_name_2019_11_28') == True: hash_ref = 'aim' + group_ref[4:] group_ref_hash = utils.md5sum(str_data=hash_ref) if group_ref_hash in self.troposphere_param_cache.keys(): return troposphere.Ref( self.troposphere_param_cache[group_ref_hash]) source_sg_param = self.create_cfn_parameter( param_type=param_type, name=param_name + group_ref_hash, description=f'{param_description} - {hash_ref}', value=f'{group_ref}.{ref_att}') self.troposphere_param_cache[group_ref_hash] = source_sg_param return troposphere.Ref(self.troposphere_param_cache[group_ref_hash])
def asg_hook_update_cloudwatch_agent_cache(self, hook, asg): "Cache method for ECS ASG" cloudwatch_config = self.gen_windows_cloudwatch_agent_config() return md5sum(str_data=cloudwatch_config)
def __init__( self, stack, paco_ctx, record_set_name, record_set_config, ): if references.is_ref(record_set_name) == True: record_set_name = paco_ctx.get_ref(record_set_name) super().__init__(stack, paco_ctx) hosted_zone_is_private = self.paco_ctx.get_ref( record_set_config['dns'].hosted_zone + '.private_hosted_zone') aws_name = 'RecordSet' if hosted_zone_is_private == True: aws_name = aws_name + '-Private' self.set_aws_name(aws_name, record_set_name) # Troposphere Template Initialization self.init_template('Route53 RecordSet: ' + record_set_name) # Parameters hosted_zone_id = record_set_config['dns'].hosted_zone if references.is_ref(record_set_config['dns'].hosted_zone): hosted_zone_id = record_set_config['dns'].hosted_zone + '.id' hosted_zone_id_param = self.create_cfn_parameter( param_type='String', name='HostedZoneId', description='Record Set Hosted Zone Id', value=hosted_zone_id, ) record_set_type = record_set_config['record_set_type'] if record_set_config['record_set_type'] == 'Alias': record_set_type = 'A' record_set_dict = { 'HostedZoneId': troposphere.Ref(hosted_zone_id_param), 'Name': record_set_name, 'Type': record_set_type } # Alias if record_set_config['record_set_type'] == "Alias": alias_hosted_zone_id_param = self.create_cfn_parameter( param_type='String', name='AliasHostedZoneId', description='Hosted Zone Id for the A Alias', value=record_set_config['alias_hosted_zone_id'], ) alias_dns_name_param = self.create_cfn_parameter( param_type='String', name='AliasDNSName', description='DNS Name for the A Alias', value=record_set_config['alias_dns_name'], ) record_set_dict['AliasTarget'] = { 'DNSName': troposphere.Ref(alias_dns_name_param), 'HostedZoneId': troposphere.Ref(alias_hosted_zone_id_param) } else: record_set_dict['TTL'] = record_set_config['dns'].ttl record_set_dict['ResourceRecords'] = [] for resource_record in record_set_config['resource_records']: # legacy_flag: aim_name_2019_11_28 - hash with aim.ref instead of paco.ref hash_name = resource_record if self.paco_ctx.legacy_flag('aim_name_2019_11_28') == True: hash_name = 'aim' + hash_name[4:] record_hash = utils.md5sum(str_data=hash_name) resource_record_param = self.create_cfn_parameter( param_type='String', name='ResourceRecord' + record_hash, description='Resource Record: ' + hash_name, value=resource_record, ) record_set_dict['ResourceRecords'].append( troposphere.Ref(resource_record_param)) record_set_res = troposphere.route53.RecordSetType.from_dict( self.create_cfn_logical_id_join(['RecordSet']), record_set_dict) self.template.add_resource(record_set_res)
def __init__(self, paco_ctx, account_ctx, aws_region, stack_group, stack_tags, env_ctx, app_id, grp_id, asg_id, asg_config, asg_config_ref, role_profile_arn, ec2_manager_user_data_script, ec2_manager_cache_id): self.env_ctx = env_ctx self.ec2_manager_cache_id = ec2_manager_cache_id segment_stack = self.env_ctx.get_segment_stack(asg_config.segment) # Super Init: super().__init__(paco_ctx, account_ctx, aws_region, enabled=asg_config.is_enabled(), config_ref=asg_config_ref, stack_group=stack_group, stack_tags=stack_tags, change_protected=asg_config.change_protected) self.set_aws_name('ASG', grp_id, asg_id) self.asg_config = asg_config # Troposphere self.init_template('AutoScalingGroup: ' + self.ec2_manager_cache_id) template = self.template # InstanceAMI Parameter is preserved in disabled templates so it can be smoothly disabled/enabled if self.asg_config.instance_ami_ignore_changes: ignore_changes = True else: ignore_changes = False instance_ami_param = self.create_cfn_parameter( param_type='String', name='InstanceAMI', description='The Amazon Machine Image Id to launch instances with.', value=asg_config.instance_ami, ignore_changes=ignore_changes, ) # if the network for the ASG is disabled, only use an empty placeholder env_region = get_parent_by_interface(asg_config, schemas.IEnvironmentRegion) if not env_region.network.is_enabled(): self.set_template(template.to_yaml()) return security_group_list_param = self.create_cfn_ref_list_param( param_type='List<AWS::EC2::SecurityGroup::Id>', name='SecurityGroupList', description= 'List of security group ids to attach to the ASG instances.', value=asg_config.security_groups, ref_attribute='id', ) instance_key_pair_param = self.create_cfn_parameter( param_type='String', name='InstanceKeyPair', description='The EC2 SSH KeyPair to assign each ASG instance.', value=asg_config.instance_key_pair + '.keypair_name', ) launch_config_dict = { 'AssociatePublicIpAddress': asg_config.associate_public_ip_address, 'EbsOptimized': asg_config.ebs_optimized, 'ImageId': troposphere.Ref(instance_ami_param), 'InstanceMonitoring': asg_config.instance_monitoring, 'InstanceType': asg_config.instance_type, 'KeyName': troposphere.Ref(instance_key_pair_param), 'SecurityGroups': troposphere.Ref(security_group_list_param), } # BlockDeviceMappings if len(asg_config.block_device_mappings) > 0: mappings = [] for bdm in asg_config.block_device_mappings: mappings.append(bdm.cfn_export_dict) launch_config_dict["BlockDeviceMappings"] = mappings user_data_script = '' if ec2_manager_user_data_script != None: user_data_script += ec2_manager_user_data_script if asg_config.user_data_script != '': user_data_script += asg_config.user_data_script.replace( '#!/bin/bash', '') if user_data_script != '': user_data_64 = base64.b64encode(user_data_script.encode('ascii')) user_data_script_param = self.create_cfn_parameter( param_type='String', name='UserDataScript', description='User data script to run at instance launch.', value=user_data_64.decode('ascii'), ) launch_config_dict['UserData'] = troposphere.Ref( user_data_script_param) if role_profile_arn != None: launch_config_dict['IamInstanceProfile'] = role_profile_arn # CloudFormation Init if asg_config.cfn_init and asg_config.is_enabled(): launch_config_dict['Metadata'] = troposphere.autoscaling.Metadata( asg_config.cfn_init.export_as_troposphere()) for key, value in asg_config.cfn_init.parameters.items(): if type(value) == type(str()): param_type = 'String' elif type(value) == type(int()) or type(value) == type( float()): param_type = 'Number' else: raise UnsupportedCloudFormationParameterType( "Can not cast {} of type {} to a CloudFormation Parameter type." .format(value, type(value))) cfn_init_param = self.create_cfn_parameter( param_type=param_type, name=key, description='CloudFormation Init Parameter {} for ASG {}'. format(key, asg_config.name), value=value, ) # Launch Configuration resource launch_config_res = troposphere.autoscaling.LaunchConfiguration.from_dict( 'LaunchConfiguration', launch_config_dict) template.add_resource(launch_config_res) subnet_list_ref = 'paco.ref {}'.format( segment_stack.template.config_ref) if asg_config.availability_zone == 'all': subnet_list_ref += '.subnet_id_list' else: subnet_list_ref += '.az{}.subnet_id'.format( asg_config.availability_zone) asg_subnet_list_param = self.create_cfn_parameter( param_type='List<AWS::EC2::Subnet::Id>', name='ASGSubnetList', description='A list of subnets where the ASG will launch instances', value=subnet_list_ref) min_instances = asg_config.min_instances if asg_config.is_enabled( ) else 0 desired_capacity = asg_config.desired_capacity if asg_config.is_enabled( ) else 0 desired_capacity_param = self.create_cfn_parameter( param_type='String', name='DesiredCapacity', description='The desired capacity of instances to run in the ASG.', value=desired_capacity, ignore_changes=self.asg_config.desired_capacity_ignore_changes, ) asg_dict = { 'AutoScalingGroupName': asg_config.get_aws_name(), 'DesiredCapacity': troposphere.Ref(desired_capacity_param), 'HealthCheckGracePeriod': asg_config.health_check_grace_period_secs, 'LaunchConfigurationName': troposphere.Ref(launch_config_res), 'MaxSize': asg_config.max_instances, 'MinSize': min_instances, 'Cooldown': asg_config.cooldown_secs, 'HealthCheckType': asg_config.health_check_type, 'TerminationPolicies': asg_config.termination_policies, 'VPCZoneIdentifier': troposphere.Ref(asg_subnet_list_param), } if asg_config.load_balancers != None and len( asg_config.load_balancers) > 0: load_balancer_names_param = self.create_cfn_ref_list_param( param_type='List<String>', name='LoadBalancerNames', description= 'A list of load balancer names to attach to the ASG', value=asg_config.load_balancers, ) asg_dict['LoadBalancerNames'] = troposphere.Ref( load_balancer_names_param) if asg_config.is_enabled(): if asg_config.target_groups != None and len( asg_config.target_groups) > 0: asg_dict['TargetGroupARNs'] = [] for target_group_arn in asg_config.target_groups: target_group_arn_param = self.create_cfn_parameter( param_type='String', name='TargetGroupARNs' + utils.md5sum(str_data=target_group_arn), description='A Target Group ARNs to attach to the ASG', value=target_group_arn + '.arn', ) asg_dict['TargetGroupARNs'].append( troposphere.Ref(target_group_arn_param)) if asg_config.monitoring != None and \ asg_config.monitoring.is_enabled() == True and \ len(asg_config.monitoring.asg_metrics) > 0: asg_dict['MetricsCollection'] = [{ 'Granularity': '1Minute', 'Metrics': asg_config.monitoring.asg_metrics }] # ASG Tags asg_dict['Tags'] = [ troposphere.autoscaling.Tag('Name', asg_dict['AutoScalingGroupName'], True) ] # EIP if asg_config.eip != None and asg_config.is_enabled(): if references.is_ref(asg_config.eip) == True: eip_value = asg_config.eip + '.allocation_id' else: eip_value = asg_config.eip eip_id_param = self.create_cfn_parameter( param_type='String', name='EIPAllocationId', description= 'The allocation Id of the EIP to attach to the instance.', value=eip_value, ) asg_dict['Tags'].append( troposphere.autoscaling.Tag('Paco-EIP-Allocation-Id', troposphere.Ref(eip_id_param), True)) # EFS FileSystemId Tags if asg_config.is_enabled(): for efs_mount in asg_config.efs_mounts: target_hash = utils.md5sum(str_data=efs_mount.target) if references.is_ref(efs_mount.target) == True: efs_value = efs_mount.target + '.id' else: efs_value = efs_mount.target efs_id_param = self.create_cfn_parameter( param_type='String', name='EFSId' + target_hash, description='EFS Id', value=efs_value, ) asg_tag = troposphere.autoscaling.Tag( 'efs-id-' + target_hash, troposphere.Ref(efs_id_param), True) asg_dict['Tags'].append(asg_tag) # EBS Volume Id and Device name Tags for ebs_volume_mount in asg_config.ebs_volume_mounts: if ebs_volume_mount.is_enabled() == False: continue volume_hash = utils.md5sum(str_data=ebs_volume_mount.volume) if references.is_ref(ebs_volume_mount.volume) == True: ebs_volume_id_value = ebs_volume_mount.volume + '.id' else: ebs_volume_id_value = ebs_volume_mount.volume # Volume Id ebs_volume_id_param = self.create_cfn_parameter( param_type='String', name='EBSVolumeId' + volume_hash, description='EBS Volume Id', value=ebs_volume_id_value) ebs_volume_id_tag = troposphere.autoscaling.Tag( 'ebs-volume-id-' + volume_hash, troposphere.Ref(ebs_volume_id_param), True) asg_dict['Tags'].append(ebs_volume_id_tag) #ebs_device_param = self.create_cfn_parameter( # param_type='String', # name='EBSDevice'+volume_hash, # description='EBS Device Name', # value=ebs_volume_mount.device, #) #ebs_device_tag = troposphere.autoscaling.Tag( # 'ebs-device-' + volume_hash, # troposphere.Ref(ebs_device_param), # True #) #asg_dict['Tags'].append(ebs_device_tag) asg_res = troposphere.autoscaling.AutoScalingGroup.from_dict( 'ASG', asg_dict) template.add_resource(asg_res) asg_res.DependsOn = launch_config_res max_batch_size = 1 min_instances_in_service = 0 pause_time = 'PT0S' wait_on_resource_signals = False if asg_config.is_enabled() == True: if asg_config.rolling_update_policy != None: if asg_config.rolling_update_policy.is_enabled(): max_batch_size = asg_config.rolling_update_policy.max_batch_size min_instances_in_service = asg_config.rolling_update_policy.min_instances_in_service pause_time = asg_config.rolling_update_policy.pause_time wait_on_resource_signals = asg_config.rolling_update_policy.wait_on_resource_signals else: max_batch_size = asg_config.update_policy_max_batch_size min_instances_in_service = asg_config.update_policy_min_instances_in_service asg_res.UpdatePolicy = troposphere.policies.UpdatePolicy( AutoScalingRollingUpdate=troposphere.policies. AutoScalingRollingUpdate( MaxBatchSize=max_batch_size, MinInstancesInService=min_instances_in_service, PauseTime=pause_time, WaitOnResourceSignals=wait_on_resource_signals)) self.create_output(title='ASGName', value=troposphere.Ref(asg_res), description='Auto Scaling Group Name', ref=[asg_config_ref, asg_config_ref + '.name']) # CPU Scaling Policy if asg_config.scaling_policy_cpu_average > 0: troposphere.autoscaling.ScalingPolicy( title='CPUAverageScalingPolicy', template=template, AutoScalingGroupName=troposphere.Ref(asg_res), PolicyType='TargetTrackingScaling', TargetTrackingConfiguration=troposphere.autoscaling. TargetTrackingConfiguration( PredefinedMetricSpecification=troposphere.autoscaling. PredefinedMetricSpecification( PredefinedMetricType='ASGAverageCPUUtilization'), TargetValue=float(asg_config.scaling_policy_cpu_average))) if asg_config.scaling_policies != None: for scaling_policy_name in asg_config.scaling_policies.keys(): scaling_policy = asg_config.scaling_policies[ scaling_policy_name] if scaling_policy.is_enabled() == False: continue scaling_policy_res = troposphere.autoscaling.ScalingPolicy( title=self.create_cfn_logical_id_join( ['ScalingPolicy', scaling_policy_name], camel_case=True), template=template, AdjustmentType=scaling_policy.adjustment_type, AutoScalingGroupName=troposphere.Ref(asg_res), PolicyType=scaling_policy.policy_type, ScalingAdjustment=scaling_policy.scaling_adjustment, Cooldown=scaling_policy.cooldown) alarm_idx = 0 for alarm in scaling_policy.alarms: dimension_list = [] for dimension in alarm.dimensions: dimension_value = dimension.value if dimension.name == 'AutoScalingGroupName' and references.is_ref( dimension.value): # Reference the local ASG if the ref points here dimension_ref = Reference(dimension.value) if dimension_ref.ref == self.config_ref: dimension_value = troposphere.Ref(asg_res) dimension_res = troposphere.cloudwatch.MetricDimension( Name=dimension.name, Value=dimension_value) dimension_list.append(dimension_res) if len(dimension_list) == 0: dimension_list = troposphere.Ref('AWS::NoValue') # Alarm Resource troposphere.cloudwatch.Alarm( title=self.create_cfn_logical_id_join([ 'ScalingPolicyAlarm', scaling_policy_name, str(alarm_idx) ], camel_case=True), template=template, ActionsEnabled=True, AlarmActions=[troposphere.Ref(scaling_policy_res)], AlarmDescription=alarm.alarm_description, ComparisonOperator=alarm.comparison_operator, MetricName=alarm.metric_name, Namespace=alarm.namespace, Period=alarm.period, Threshold=alarm.threshold, EvaluationPeriods=alarm.evaluation_periods, Statistic=alarm.statistic, Dimensions=dimension_list) alarm_idx += 1 if asg_config.lifecycle_hooks != None: for lifecycle_hook_name in asg_config.lifecycle_hooks: lifecycle_hook = asg_config.lifecycle_hooks[ lifecycle_hook_name] if lifecycle_hook.is_enabled() == False: continue troposphere.autoscaling.LifecycleHook( title=self.create_cfn_logical_id_join( ['LifecycleHook', lifecycle_hook_name], camel_case=True), template=template, AutoScalingGroupName=troposphere.Ref(asg_res), DefaultResult=lifecycle_hook.default_result, LifecycleTransition=lifecycle_hook.lifecycle_transition, RoleARN=lifecycle_hook.role_arn, NotificationTargetARN=lifecycle_hook. notification_target_arn) self.set_template()