def get_aws_name(self): "Name of the IoT Policy in AWS" ne = get_parent_by_interface(self, schemas.INetworkEnvironment) env = get_parent_by_interface(self, schemas.IEnvironment) app = get_parent_by_interface(self, schemas.IApplication) group = get_parent_by_interface(self, schemas.IResourceGroup) name_list = [ 'ne', ne.name, env.name, 'app', app.name, group.name, self.name, ] aws_name = smart_join('-', name_list) aws_name = aws_name.replace('_', '-').lower() # If the generated policy name is > 128 chars, then prefix a hash of the name if len(aws_name) > 128: name_hash = md5sum(str_data=aws_name)[:8] copy_size = -(128 - 9) if aws_name[copy_size] != '-': name_hash += '-' aws_name = name_hash + aws_name[copy_size:] return aws_name
def notification_groups(self): "A unique list of notification groups that an Alarm is subscribed to" groups = {} # start with any notifications specific to the alarm groups = self._add_notifications_to_groups(self.notifications, groups) # add on notifications for the AlarmSet # does not exist for certain Alarms (e.g. Route53HealthCheck Alarm) alarm_set = get_parent_by_interface(self, schemas.IAlarmSet) if alarm_set != None: groups = self._add_notifications_to_groups(alarm_set.notifications, groups) # For Alarms that belong to an application, check IMonitorConfig and IApplication # add on notifications for the Resource monitor = get_parent_by_interface(self, schemas.IMonitorConfig) if monitor != None: groups = self._add_notifications_to_groups(monitor.notifications, groups) # add on notifications for the Application app = get_parent_by_interface(self, schemas.IApplication) if app != None: groups = self._add_notifications_to_groups(app.notifications, groups) return [key for key in groups.keys()]
def get_account(self): """ Return the Account object that this resource is provisioned to """ env_reg = get_parent_by_interface(self, schemas.IEnvironmentRegion) project = get_parent_by_interface(self, schemas.IProject) return get_model_obj_from_ref(env_reg.network.aws_account, project)
def prefixed_name(resource, name, legacy_flag=None): """Returns a name prefixed to be unique: e.g. netenv_name-env_name-app_name-group_name-resource_name-name""" str_list = [] # currently only works for resources in an environment if legacy_flag != None: if legacy_flag('netenv_loggroup_name_2019_10_13') == False: netenv = get_parent_by_interface(resource, schemas.INetworkEnvironment) if netenv != None: str_list.append(netenv.name) else: netenv = get_parent_by_interface(resource, schemas.INetworkEnvironment) if netenv != None: str_list.append(netenv.name) app_name = get_parent_by_interface(resource, schemas.IApplication).name group_name = get_parent_by_interface(resource, schemas.IResourceGroup).name env = get_parent_by_interface(resource, schemas.IEnvironment) # Services do not have an environment if env != None: str_list.extend([env.name, app_name, group_name, resource.name, name]) else: str_list.extend([app_name, group_name, resource.name, name]) return '-'.join(str_list)
def netenv_name(self): if hasattr(self, '_netenv_name'): return self._netenv_name netenv = get_parent_by_interface(self, schemas.INetworkEnvironment) if netenv == None: service = get_parent_by_interface(self, schemas.IService) self._netenv_name = service.name else: self._netenv_name = netenv.name return self._netenv_name
def env_obj(self): if hasattr(self, '_env_obj'): return self._env_obj env = get_parent_by_interface(self, schemas.IEnvironment) if env == None: account = get_parent_by_interface(self, schemas.IAccountContainer) self._env_obj = account else: self._env_obj = env return self._env_obj
def env_name(self): if hasattr(self, '_env_name'): return self._env_name env = get_parent_by_interface(self, schemas.IEnvironment) if env == None: account = get_parent_by_interface(self, schemas.IAccountContainer) self._env_name = account.name else: self._env_name = env.name return self._env_name
def env_region_obj(self): if hasattr(self, '_env_region_obj'): return self._env_region_obj env_region = get_parent_by_interface(self, schemas.IEnvironmentRegion) if env_region == None: region_cont = get_parent_by_interface(self, schemas.IRegionContainer) self._env_region_obj = region_cont else: self._env_region_obj = env_region return self._env_region_obj
def account_name(self): account_cont = get_parent_by_interface(self, schemas.IAccountContainer) if account_cont != None: return account_cont.name env_region = get_parent_by_interface(self, schemas.IEnvironmentRegion) project = get_parent_by_interface(self) if env_region != None: return get_model_obj_from_ref(env_region.network.aws_account, project).name raise AttributeError('Could not determine account for {}'.format( self.name))
def get_account(self): """ Return the Account object that this resource is provisioned to """ region = get_parent_by_interface(self, schemas.IRegionContainer) project = get_parent_by_interface(self, schemas.IProject) # NetEnv accounts if schemas.IEnvironmentRegion.providedBy(region): return get_model_obj_from_ref(region.network.aws_account, project) # Service accounts else: account_cont = get_parent_by_interface(self, schemas.IAccountContainer) return project.accounts[account_cont.name]
def __init__( self, stack, paco_ctx, nat_sg_config, ): if nat_sg_config != None: nat_sg_config_ref = nat_sg_config.paco_ref_parts nat_config = stack.resource network_config = get_parent_by_interface(nat_config, schemas.INetwork) super().__init__( stack, paco_ctx, iam_capabilities=["CAPABILITY_NAMED_IAM"], ) self.set_aws_name('NGW', nat_config.name) # empty template if not enabled # ToDo: Once the Managed type is Troposphere this code can be simplified if not nat_config.is_enabled(): if nat_config.type == 'Managed': self.init_template('NAT Gateways') else: self.init_template('EC2 NAT Gateway') return if nat_config.type == 'Managed': self.managed_nat_gateway(network_config, nat_config) else: self.init_template('EC2 NAT Gateway') self.ec2_nat_gateway(network_config, nat_sg_config, nat_sg_config_ref, nat_config)
def set_alarm_actions_to_cfn_export(self, alarm, cfn_export_dict): "Sets the AlarmActions, OKActions and InsufficientDataActions for a Troposphere dict" alarm_action_list = [] alarm_parent = get_parent_by_interface(alarm, schemas.IResource) alarm_account = alarm_parent.get_account() if alarm_account.name not in self.paco_ctx.project['resource']['sns'].computed.keys() or \ alarm.region_name not in self.paco_ctx.project['resource']['sns'].computed[alarm_account.name].keys(): message = f'Could not find SNS topics for account "{alarm_account.name}" in region "{alarm.region_name}"' raise MissingSNSTopics(message) notification_groups = self.paco_ctx.project['resource'][ 'sns'].computed[alarm_account.name][alarm.region_name] for alarm_action in alarm.get_alarm_actions_paco_refs( notification_groups): # Create parameter param_name = 'AlarmAction{}'.format( utils.md5sum(str_data=alarm_action)) if param_name in self.alarm_action_param_map.keys(): alarm_action_param = self.alarm_action_param_map[param_name] else: alarm_action_param = self.create_cfn_parameter( param_type='String', name=param_name, description='SNSTopic for Alarm to notify.', value=alarm_action) self.alarm_action_param_map[param_name] = alarm_action_param alarm_action_list.append(troposphere.Ref(alarm_action_param)) cfn_export_dict['AlarmActions'] = alarm_action_list if getattr(alarm, 'enable_ok_actions', False): cfn_export_dict['OKActions'] = alarm_action_list if getattr(alarm, 'enable_insufficient_data_actions', False): cfn_export_dict['InsufficientDataActions'] = alarm_action_list
def create_notification_params(self, alarm): "Create a Parameter for each SNS Topic an alarm should notify. Return a list of Refs to those Params." notification_paco_refs = [] alarm_parent = get_parent_by_interface(alarm, schemas.IResource) alarm_account = alarm_parent.get_account() for group in alarm.notification_groups: if not self.notification_region: region = alarm.region_name else: region = self.notification_region notification_paco_refs.append( self.paco_ctx.project['resource']['sns'].computed[ alarm_account.name][region][group].paco_ref + '.arn') notification_cfn_refs = [] for notification_paco_ref in notification_paco_refs: # Create parameter param_name = 'Notification{}'.format( utils.md5sum(str_data=notification_paco_ref)) if param_name in self.notification_param_map.keys(): notification_param = self.notification_param_map[param_name] else: notification_param = self.create_cfn_parameter( param_type='String', name=param_name, description='SNS Topic to notify', value=notification_paco_ref, min_length= 1, # prevent borked empty values from breaking notification ) self.notification_param_map[param_name] = notification_param notification_cfn_refs.append(troposphere.Ref(notification_param)) return notification_cfn_refs
def get_full_log_group_name(self): if self.external_resource == True: return self.log_group_name name = self.get_log_group_name() parent = get_parent_by_interface(self, schemas.ICloudWatchLogSet) if parent != None: return parent.name + '-' + name return name
def set_command(self, resource): "Set a cloud resource or property" secrets_manager = get_parent_by_interface(resource, schemas.ISecretsManager) if secrets_manager != None: account_ctx = self.paco_ctx.get_account_context(account_ref=self.env_region.network.aws_account) secret_name = resource.paco_ref_parts self.secrets_manager(secret_name, account_ctx, self.env_region.name) raise UnknownSetCommand(f"Unable to apply set command for resource of type '{resource.__class__.__name__}'\nObject: {resource.paco_ref_parts}")
def get_resource(self): resource = None node = get_parent_by_interface(self, schemas.IApiGatewayRestApi).resources for part in self.resource_name.split('.'): resource = node[part] node = resource.child_resources return resource
def resource_group_name(self): """The Resource Group name or None. Only Application resources have a Resource Group name, e.g. BackupVault Resource does not. """ resource_group = get_parent_by_interface(self.stack.resource, schemas.IResourceGroup) if resource_group != None: return resource_group.name return None
def get_account(self, project, resource): "Account object this reference belongs to" if self.type == 'service': return project.accounts[self.parts[2]] elif self.type == 'netenv': env_reg = get_parent_by_interface(resource, schemas.IEnvironmentRegion) return get_model_obj_from_ref(env_reg.network.aws_account, project) return None
def get_alarm_description(self, notification_cfn_refs): """Create an Alarm Description in JSON format with Paco Alarm information""" project = get_parent_by_interface(self, schemas.IProject) netenv = get_parent_by_interface(self, schemas.INetworkEnvironment) env = get_parent_by_interface(self, schemas.IEnvironment) envreg = get_parent_by_interface(self, schemas.IEnvironmentRegion) app = get_parent_by_interface(self, schemas.IApplication) group = get_parent_by_interface(self, schemas.IResourceGroup) resource = get_parent_by_interface(self, schemas.IResource) # SNS Topic ARNs are supplied Paramter Refs topic_arn_subs = [] sub_dict = {} for action_ref in notification_cfn_refs: ref_id = action_ref.data['Ref'] topic_arn_subs.append('${%s}' % ref_id) sub_dict[ref_id] = action_ref # Base alarm info - used for standalone alarms not part of an application description = { "project_name": project.name, "project_title": project.title, "account_name": self.account_name, "alarm_name": self.name, "classification": self.classification, "severity": self.severity, "topic_arns": topic_arn_subs } # conditional fields: if self.description: description['description'] = self.description if self.runbook_url: description['runbook_url'] = self.runbook_url if app != None: # Service applications and apps not part of a NetEnv description["app_name"] = app.name description["app_title"] = app.title if group != None: # Application level Alarms do not have resource group and resource description["resource_group_name"] = group.name description["resource_group_title"] = group.title description["resource_name"] = resource.name description["resource_title"] = resource.title if netenv != None: # NetEnv information description["netenv_name"] = netenv.name description["netenv_title"] = netenv.title description["env_name"] = env.name description["env_title"] = env.title description["envreg_name"] = envreg.name description["envreg_title"] = envreg.title description_json = json.dumps(description) return troposphere.Sub(description_json, sub_dict)
def get_accounts(self): """ Resolve the locations field for all accounts. If locations is empty, then all accounts are returned. """ if self.locations == []: return project['accounts'].values() accounts = [] project = get_parent_by_interface(self, schemas.IProject) for location in self.locations: account = references.get_model_obj_from_ref(location.account, project) accounts.append(account) return accounts
def get_accounts(self): """ Resolve the CloudTrail.accounts field to a list of IAccount objects from the model. If the field is empty, then all accounts are returned. """ project = get_parent_by_interface(self, schemas.IProject) if self.accounts == []: accounts = project['accounts'].values() else: accounts = [] for account_ref in self.accounts: account = references.get_model_obj_from_ref(account_ref, project) accounts.append(account) return accounts
def region_name(self): # ALlow an object to override it's region_name # for example, Route53HealthCheck hard-codes Metrics to us-east-1 if hasattr(self, 'overrode_region_name'): return self.overrode_region_name # region the resource is deployed in region = get_parent_by_interface(self, schemas.IRegionContainer) if region != None: return region.name # Global buckets have a region field if schemas.IS3Bucket.providedBy(self): return self.region raise AttributeError('Could not determine region for {}'.format( self.name))
def get_aws_name(self): "Name of the IoT Policy in AWS" # NetworkEnvironment or Service name name_list = [] ne = get_parent_by_interface(self, schemas.INetworkEnvironment) if ne == None: service = get_parent_by_interface(self, schemas.IService) if service == None: raise InvalidModelObject("""Unable to find an INetworkEnvironment or IService model object.""") name_list.append('Service') name_list.append(service.name) else: name_list.append('ne') name_list.append(ne.name) # Environment name or Blank if one does not exist env = get_parent_by_interface(self, schemas.IEnvironment) if env != None: name_list.append(env.name) name_list.append('app') app = get_parent_by_interface(self, schemas.IApplication) name_list.append(app.name) group = get_parent_by_interface(self, schemas.IResourceGroup) name_list.append(group.name) name_list.append(self.name) aws_name = smart_join('-', name_list) aws_name = aws_name.replace('_', '-').lower() # If the generated policy name is > 128 chars, then prefix a hash of the name if len(aws_name) > 128: name_hash = md5sum(str_data=aws_name)[:8] copy_size = -(128 - 9) if aws_name[copy_size] != '-': name_hash += '-' aws_name = name_hash + aws_name[copy_size:] return aws_name
def create_service_role(self): "Service role for ECS to assume" netenv = get_parent_by_interface(self.resource, schemas.INetworkEnvironment) iam_role_id = self.gen_iam_role_id(netenv.name, 'ECSService') role = paco.models.iam.Role(iam_role_id, self.resource) statements = [ { 'effect': 'Allow', 'action': [ 'elasticloadbalancing:DeregisterInstancesFromLoadBalancer', 'elasticloadbalancing:DeregisterTargets', 'elasticloadbalancing:Describe*', 'elasticloadbalancing:RegisterInstancesWithLoadBalancer', 'elasticloadbalancing:RegisterTargets', 'ec2:Describe*', 'ec2:AuthorizeSecurityGroupIngress', ], 'resource': ['*'], }, ] role.apply_config({ 'enabled': True, 'path': '/', 'role_name': iam_role_id, 'assume_role_policy': { 'effect': 'Allow', 'service': ['ecs.amazonaws.com'] }, 'policies': [{ 'name': 'ECS-Service', 'statement': statements }], }) iam_ctl = self.paco_ctx.get_controller('IAM') iam_ctl.add_role( region=self.aws_region, resource=self.resource, role=role, iam_role_id=iam_role_id, stack_group=self.stack_group, stack_tags=self.stack_tags, ) return role
def add_bucket(self, bucket, bucket_name_prefix=None, bucket_name_suffix=None, stack_hooks=None, change_protected=False): "Add a bucket: will create a stack and stack hooks as needed" if self.bucket_context['config'] != None: raise PacoBucketExists("Bucket already exists: %s" % (self.resource_ref)) bucket.bucket_name_prefix = bucket_name_prefix bucket.bucket_name_suffix = bucket_name_suffix res_group = get_parent_by_interface(bucket, schemas.IResourceGroup) if res_group != None: self.bucket_context['group_id'] = res_group.name self.bucket_context['config'] = bucket self.bucket_context['ref'] = self.resource_ref bucket.resolve_ref_obj = self if bucket.external_resource == True: # if the bucket already exists, do not create a stack for it self.paco_ctx.log_action_col( "Init", "S3", "External", bucket.name + ": " + bucket.get_bucket_name(), False, bucket.is_enabled()) else: self.paco_ctx.log_action_col( "Init", "S3", "Bucket", bucket.name + ": " + bucket.get_bucket_name(), False, bucket.is_enabled()) if change_protected == False: if stack_hooks == None: stack_hooks = StackHooks(self.paco_ctx) # S3 Delete on Stack Delete hook stack_hooks.add('S3StackGroup', 'delete', 'pre', self.stack_hook_pre_delete, None, self.bucket_context) self.add_stack(bucket_policy_only=False, stack_hooks=stack_hooks, stack_tags=self.stack_tags, change_protected=change_protected)
def get_alarm_actions_paco_refs(self, snstopics=None): """Return a list of alarm actions in the form of paco.ref SNS Topic ARNs, e.g. 'paco.ref service.notification.applications.notification.groups.lambda.resources.snstopic.arn' This will by default be a list of SNS Topics that the alarm is subscribed to. However, if a plugin is registered, it will provide the actions instead. """ if not snstopics: project = get_parent_by_interface(self, schemas.IProject) snstopics = project['resource']['snstopics'] # if a service plugin provides override_alarm_actions, call that instead service_plugins = paco.models.services.list_service_plugins() # Error if more than one plugin provides override_alarm_actions count = 0 for plugin_module in service_plugins.values(): if hasattr(plugin_module, 'override_alarm_actions'): count += 1 if count > 1: raise paco.models.exceptions.InvalidPacoProjectFile( 'More than one Service plugin is overriding alarm actions') for plugin_name, plugin_module in service_plugins.items(): if hasattr(plugin_module, 'override_alarm_actions'): return plugin_module.override_alarm_actions(None, self) # default behaviour is to use notification groups directly notification_arns = [ snstopics[group].paco_ref + '.arn' for group in self.notification_groups ] if len(notification_arns) > 5: raise paco.models.exceptions.InvalidPacoProjectFile(""" Alarm {} has {} actions, but CloudWatch Alarms allow a maximum of 5 actions. {}""".format(self.name, len(notification_arns), get_formatted_model_context(self))) return notification_arns
def create_iam_role(self): "Backup service Role" # if at least one vault is enabled, create an IAM Role # BackupVault will create one IAM Role for each NetworkEnvironment/Environment combination, # this way a netenv/env can be created, have it's own Role, then a different netenv/env with a second Role # if the first netenv/env is deleted, the second one will not be impacted. vaults_enabled = False for vault in self.config.values(): if vault.is_enabled(): vaults_enabled = True if not vaults_enabled: return None netenv = get_parent_by_interface(self.config, schemas.INetworkEnvironment) iam_role_id = 'Backup-{}-{}'.format(netenv.name, self.env_ctx.env.name) policy_arns = [ 'arn:aws:iam::aws:policy/service-role/AWSBackupServiceRolePolicyForBackup', 'arn:aws:iam::aws:policy/service-role/AWSBackupServiceRolePolicyForRestores' ] role_dict = { 'enabled': True, 'path': '/', 'role_name': iam_role_id, 'managed_policy_arns': policy_arns, 'assume_role_policy': { 'effect': 'Allow', 'service': ['backup.amazonaws.com'] } } role = paco.models.iam.Role(iam_role_id, self.config) role.apply_config(role_dict) iam_ctl = self.paco_ctx.get_controller('IAM') iam_ctl.add_role(region=self.env_ctx.region, resource=self.config, role=role, iam_role_id=iam_role_id, stack_group=self, stack_tags=StackTags(self.stack_tags)) return role
def gen_state_config(self, source): monitoring = self.resource.monitoring if monitoring != None and monitoring.is_enabled() == True: notifications = None if monitoring.notifications != None and len( monitoring.notifications.keys()) > 0: notifications = monitoring.notifications else: app_config = get_parent_by_interface(self.resource, schemas.IApplication) notifications = app_config.notifications source_obj = get_model_obj_from_ref(source, self.paco_ctx.project) state_config = {'type': source_obj.type, 'notifications': []} if notifications == None or len(notifications.keys()) <= 0: return state_config # Store the Notification state for this EventRule if self.resource.event_pattern == None: return state_config if source_obj.type == 'CodeBuild.Build': state_config[ 'project_name'] = source_obj._stack.template.get_project_name( ) state_config['notifications'] = {} for group_id in notifications.keys(): notify_group = notifications[group_id] state_config['notifications'][group_id] = {} state_config['notifications'][group_id][ 'severity'] = notify_group.severity state_config['notifications'][group_id]['groups'] = [] state_config['notifications'][group_id]['groups'].extend( notify_group.groups) state_config['notifications'][group_id]['slack_channels'] = [] state_config['notifications'][group_id][ 'slack_channels'].extend(notify_group.slack_channels) return state_config
def get_formatted_model_context(obj): """Return a formatted string describing a model object in it's context """ # ToDo: should work for all NetEnv objects, will need expanding to # handle Services and other objects try: netenv = get_parent_by_interface(obj, schemas.INetworkEnvironment) env = get_parent_by_interface(obj, schemas.IEnvironment) envreg = get_parent_by_interface(obj, schemas.IEnvironmentRegion) app = get_parent_by_interface(obj, schemas.IApplication) group = get_parent_by_interface(obj, schemas.IResourceGroup) except AttributeError: return 'Obj has no parent in model' # If the obj is only IParent, walk up to the first INamed for context named_obj = get_parent_by_interface(obj, schemas.INamed) out = "" if named_obj != None: out = "Paco reference: {}\n".format(named_obj.paco_ref) if netenv != None: if netenv.title: out += "Network Environment: {} ({})\n".format( netenv.title, netenv.name) else: out += "Network Environment: " + netenv.name + "\n" if env != None: if env.title: out += "Environment: {} ({})\n".format(env.title, env.name) else: out += "Environment: " + env.name + "\n" if envreg != None: out += "Account: " + envreg.network.aws_account + "\n" if envreg.title: out += "Region: {} ({})\n".format(envreg.title, envreg.name) else: out += "Region: " + envreg.name + "\n" if app != None: if app.title: out += "Application: {} ({})\n".format(app.title, app.name) else: out += "Application: " + app.name + "\n" if group != None: if group.title: out += "Resrouce Group: {} ({})\n".format(group.title, group.name) else: out += "Resource Group: " + group.name + "\n" return out
def __init__( self, stack, paco_ctx, ): super().__init__( stack, paco_ctx, iam_capabilities=["CAPABILITY_NAMED_IAM"], ) account_ctx = stack.account_ctx aws_region = stack.aws_region self.set_aws_name('Lambda', self.resource_group_name, self.resource_name) awslambda = self.awslambda = self.stack.resource self.init_template('Lambda Function') # if not enabled finish with only empty placeholder if not awslambda.is_enabled(): return # Parameters sdb_cache_param = self.create_cfn_parameter( name='EnableSDBCache', param_type='String', description='Boolean indicating whether an SDB Domain will be created to be used as a cache.', value=awslambda.sdb_cache ) function_description_param = self.create_cfn_parameter( name='FunctionDescription', param_type='String', description='A description of the Lamdba Function.', value=awslambda.description ) handler_param = self.create_cfn_parameter( name='Handler', param_type='String', description='The name of the function to call upon execution.', value=awslambda.handler ) runtime_param = self.create_cfn_parameter( name='Runtime', param_type='String', description='The name of the runtime language.', value=awslambda.runtime ) role_arn_param = self.create_cfn_parameter( name='RoleArn', param_type='String', description='The execution role for the Lambda Function.', value=awslambda.iam_role.get_arn() ) role_name_param = self.create_cfn_parameter( name='RoleName', param_type='String', description='The execution role name for the Lambda Function.', value=awslambda.iam_role.resolve_ref_obj.role_name ) memory_size_param = self.create_cfn_parameter( name='MemorySize', param_type='Number', description="The amount of memory that your function has access to. Increasing the function's" + \ " memory also increases its CPU allocation. The default value is 128 MB. The value must be a multiple of 64 MB.", value=awslambda.memory_size ) reserved_conc_exec_param = self.create_cfn_parameter( name='ReservedConcurrentExecutions', param_type='Number', description='The number of simultaneous executions to reserve for the function.', value=awslambda.reserved_concurrent_executions ) timeout_param = self.create_cfn_parameter( name='Timeout', param_type='Number', description='The amount of time that Lambda allows a function to run before stopping it. ', value=awslambda.timeout ) layers_param = self.create_cfn_parameter( name='Layers', param_type='CommaDelimitedList', description='List of up to 5 Lambda Layer ARNs.', value=','.join(awslambda.layers) ) # create the Lambda resource cfn_export_dict = { 'Description': troposphere.Ref(function_description_param), 'Handler': troposphere.Ref(handler_param), 'MemorySize': troposphere.Ref(memory_size_param), 'Runtime': troposphere.Ref(runtime_param), 'Role': troposphere.Ref(role_arn_param), 'Timeout': troposphere.Ref(timeout_param), } if awslambda.reserved_concurrent_executions: cfn_export_dict['ReservedConcurrentExecutions'] = troposphere.Ref(reserved_conc_exec_param), if len(awslambda.layers) > 0: cfn_export_dict['Layers'] = troposphere.Ref(layers_param), # Lambda VPC if awslambda.vpc_config != None: vpc_security_group = self.create_cfn_ref_list_param( name='VpcSecurityGroupIdList', param_type='List<AWS::EC2::SecurityGroup::Id>', description='VPC Security Group Id List', value=awslambda.vpc_config.security_groups, ref_attribute='id', ) # Segment SubnetList is a Segment stack Output based on availability zones segment_ref = awslambda.vpc_config.segments[0] + '.subnet_id_list' subnet_list_param = self.create_cfn_parameter( name='VpcSubnetIdList', param_type='List<AWS::EC2::Subnet::Id>', description='VPC Subnet Id List', value=segment_ref ) cfn_export_dict['VpcConfig'] = { 'SecurityGroupIds': troposphere.Ref(vpc_security_group), 'SubnetIds': troposphere.Ref(subnet_list_param), } # Code object: S3 Bucket, inline ZipFile or deploy artifact? if awslambda.code.s3_bucket: if awslambda.code.s3_bucket.startswith('paco.ref '): value = awslambda.code.s3_bucket + ".name" else: value = awslambda.code.s3_bucket s3bucket_param = self.create_cfn_parameter( name='CodeS3Bucket', description="An Amazon S3 bucket in the same AWS Region as your function. The bucket can be in a different AWS account.", param_type='String', value=value ) s3key_param = self.create_cfn_parameter( name='CodeS3Key', description="The Amazon S3 key of the deployment package.", param_type='String', value=awslambda.code.s3_key ) cfn_export_dict['Code'] = { 'S3Bucket': troposphere.Ref(s3bucket_param), 'S3Key': troposphere.Ref(s3key_param), } else: zip_path = Path(awslambda.code.zipfile) if zip_path.is_file(): cfn_export_dict['Code'] = { 'ZipFile': zip_path.read_text() } elif zip_path.is_dir(): # get S3Bucket/S3Key or if it does not exist, it will create the bucket and artifact # and then upload the artifact bucket_name, artifact_name = init_lambda_code( self.paco_ctx.paco_buckets, self.stack.resource, awslambda.code.zipfile, self.stack.account_ctx, self.stack.aws_region, ) s3bucket_param = self.create_cfn_parameter( name='CodeS3Bucket', description="The Paco S3 Bucket for configuration", param_type='String', value=bucket_name ) s3key_param = self.create_cfn_parameter( name='CodeS3Key', description="The Lambda code artifact S3 Key.", param_type='String', value=artifact_name ) cfn_export_dict['Code'] = { 'S3Bucket': troposphere.Ref(s3bucket_param), 'S3Key': troposphere.Ref(s3key_param), } # Environment variables var_export = {} if awslambda.environment != None and awslambda.environment.variables != None: for var in awslambda.environment.variables: name = var.key.replace('_','') env_param = self.create_cfn_parameter( name='EnvVar{}'.format(name), param_type='String', description='Env var for {}'.format(name), value=var.value, ) var_export[var.key] = troposphere.Ref(env_param) if awslambda.sdb_cache == True: var_export['SDB_CACHE_DOMAIN'] = troposphere.Ref('LambdaSDBCacheDomain') if len(awslambda.log_group_names) > 0: # Add PACO_LOG_GROUPS Environment Variable paco_log_groups = [ prefixed_name(awslambda, loggroup_name, self.paco_ctx.legacy_flag) for loggroup_name in awslambda.log_group_names ] paco_log_groups_param = self.create_cfn_parameter( name='EnvVariablePacoLogGroups', param_type='String', description='Env var for Paco Log Groups', value=','.join(paco_log_groups), ) var_export['PACO_LOG_GROUPS'] = troposphere.Ref(paco_log_groups_param) cfn_export_dict['Environment'] = { 'Variables': var_export } # Lambda resource self.awslambda_resource = troposphere.awslambda.Function.from_dict( 'Function', cfn_export_dict ) self.template.add_resource(self.awslambda_resource) # SDB Cache with SDB Domain and SDB Domain Policy resources if awslambda.sdb_cache == True: sdb_domain_resource = troposphere.sdb.Domain( title='LambdaSDBCacheDomain', template=self.template, Description="Lambda Function Domain" ) sdb_policy = troposphere.iam.Policy( title='LambdaSDBCacheDomainPolicy', template=self.template, PolicyName='SDBDomain', PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Action=[Action("sdb","*")], Resource=[ troposphere.Sub( 'arn:aws:sdb:${AWS::Region}:${AWS::AccountId}:domain/${DomainName}', DomainName=troposphere.Ref('LambdaSDBCacheDomain') ) ], ) ], Roles=troposphere.Ref(role_arn_param) ) ) sdb_policy.DependsOn = sdb_domain_resource self.awslambda_resource.DependsOn = sdb_domain_resource # Permissions # SNS Topic Lambda permissions and subscription idx = 1 for sns_topic_ref in awslambda.sns_topics: # SNS Topic Arn parameters param_name = 'SNSTopicArn%d' % idx self.create_cfn_parameter( name=param_name, param_type='String', description='An SNS Topic ARN to grant permission to.', value=sns_topic_ref + '.arn' ) # Lambda permission troposphere.awslambda.Permission( title=param_name + 'Permission', template=self.template, Action="lambda:InvokeFunction", FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'), Principal='sns.amazonaws.com', SourceArn=troposphere.Ref(param_name), ) # SNS Topic subscription sns_topic = get_model_obj_from_ref(sns_topic_ref, self.paco_ctx.project) troposphere.sns.SubscriptionResource( title=param_name + 'Subscription', template=self.template, Endpoint=troposphere.GetAtt(self.awslambda_resource, 'Arn'), Protocol='lambda', TopicArn=troposphere.Ref(param_name), Region=sns_topic.region_name ) idx += 1 # Lambda permissions for connected Paco resources app = get_parent_by_interface(awslambda, schemas.IApplication) for obj in get_all_nodes(app): # S3 Bucket notification permission(s) if schemas.IS3Bucket.providedBy(obj): seen = {} if hasattr(obj, 'notifications'): if hasattr(obj.notifications, 'lambdas'): for lambda_notif in obj.notifications.lambdas: if lambda_notif.function == awslambda.paco_ref: # yes, this Lambda gets notification from this S3Bucket group = get_parent_by_interface(obj, schemas.IResourceGroup) s3_logical_name = self.gen_cf_logical_name(group.name + obj.name, '_') if s3_logical_name not in seen: troposphere.awslambda.Permission( title='S3Bucket' + s3_logical_name, template=self.template, Action="lambda:InvokeFunction", FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'), Principal='s3.amazonaws.com', SourceArn='arn:aws:s3:::' + obj.get_bucket_name(), ) seen[s3_logical_name] = True # Events Rule permission(s) if schemas.IEventsRule.providedBy(obj): seen = {} for target in obj.targets: target_ref = Reference(target.target) target_ref.set_account_name(account_ctx.get_name()) target_ref.set_region(aws_region) lambda_ref = Reference(awslambda.paco_ref) if target_ref.raw == lambda_ref.raw: # yes, the Events Rule has a Target that is this Lambda group = get_parent_by_interface(obj, schemas.IResourceGroup) eventsrule_logical_name = self.gen_cf_logical_name(group.name + obj.name, '_') if eventsrule_logical_name not in seen: rule_name = create_event_rule_name(obj) # rule_name = self.create_cfn_logical_id("EventsRule" + obj.paco_ref) # rule_name = hash_smaller(rule_name, 64) source_arn = 'arn:aws:events:{}:{}:rule/{}'.format( aws_region, account_ctx.id, rule_name ) troposphere.awslambda.Permission( title='EventsRule' + eventsrule_logical_name, template=self.template, Action="lambda:InvokeFunction", FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'), Principal='events.amazonaws.com', SourceArn=source_arn, ) seen[eventsrule_logical_name] = True # IoT Analytics permission(s) if schemas.IIoTAnalyticsPipeline.providedBy(obj): seen = {} for activity in obj.pipeline_activities.values(): if activity.activity_type == 'lambda': target_ref = Reference(activity.function) target_ref.set_account_name(account_ctx.get_name()) target_ref.set_region(aws_region) lambda_ref = Reference(awslambda.paco_ref) if target_ref.raw == lambda_ref.raw: # yes, the IoT Analytics Lambda Activity has a ref to this Lambda group = get_parent_by_interface(obj, schemas.IResourceGroup) iotap_logical_name = self.gen_cf_logical_name(group.name + obj.name, '_') if iotap_logical_name not in seen: rule_name = create_event_rule_name(obj) troposphere.awslambda.Permission( title='IoTAnalyticsPipeline' + iotap_logical_name, template=self.template, Action="lambda:InvokeFunction", FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'), Principal='iotanalytics.amazonaws.com', ) seen[iotap_logical_name] = True # Log group(s) loggroup_function_name = troposphere.Join( '', [ '/aws/lambda/', troposphere.Select( 6, troposphere.Split(':', troposphere.GetAtt(self.awslambda_resource, 'Arn')) ) ] ) loggroup_resources = [] loggroup_resources.append( self.add_log_group(loggroup_function_name, 'lambda') ) if len(awslambda.log_group_names) > 0: # Additional App-specific LogGroups for loggroup_name in awslambda.log_group_names: # Add LogGroup to the template prefixed_loggroup_name = prefixed_name(awslambda, loggroup_name, self.paco_ctx.legacy_flag) loggroup_resources.append( self.add_log_group(prefixed_loggroup_name) ) # LogGroup permissions log_group_arns = [ troposphere.Join(':', [ f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group', loggroup_function_name, '*' ]) ] log_stream_arns = [ troposphere.Join(':', [ f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group', loggroup_function_name, 'log-stream', '*' ]) ] for loggroup_name in awslambda.log_group_names: prefixed_loggroup_name = prefixed_name(awslambda, loggroup_name, self.paco_ctx.legacy_flag) log_group_arns.append( f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group:{prefixed_loggroup_name}:*' ) log_stream_arns.append( f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group:{prefixed_loggroup_name}:log-stream:*' ) loggroup_policy_resource = troposphere.iam.ManagedPolicy( title='LogGroupManagedPolicy', PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Sid='AllowLambdaModifyLogStreams', Effect=Allow, Action=[ Action("logs","CreateLogStream"), Action("logs","DescribeLogStreams"), ], Resource=log_group_arns, ), Statement( Sid='AllowLambdaPutLogEvents', Effect=Allow, Action=[ Action("logs","PutLogEvents"), ], Resource=log_stream_arns, ), ], ), Roles=[troposphere.Ref(role_name_param)], ) loggroup_policy_resource.DependsOn = loggroup_resources self.template.add_resource(loggroup_policy_resource) # Outputs self.create_output( title='FunctionName', value=troposphere.Ref(self.awslambda_resource), ref=awslambda.paco_ref_parts + '.name', ) self.create_output( title='FunctionArn', value=troposphere.GetAtt(self.awslambda_resource, 'Arn'), ref=awslambda.paco_ref_parts + '.arn', )