class SamSimpleTable(SamResourceMacro): """SAM simple table macro. """ resource_type = 'AWS::Serverless::SimpleTable' property_types = { 'PrimaryKey': PropertyType(False, dict_of(is_str(), is_str())), 'ProvisionedThroughput': PropertyType(False, dict_of(is_str(), one_of(is_type(int), is_type(dict)))), 'TableName': PropertyType(False, one_of(is_str(), is_type(dict))), 'Tags': PropertyType(False, is_type(dict)), 'SSESpecification': PropertyType(False, is_type(dict)) } attribute_type_conversions = { 'String': 'S', 'Number': 'N', 'Binary': 'B' } def to_cloudformation(self, **kwargs): dynamodb_resources = self._construct_dynamodb_table() return [dynamodb_resources] def _construct_dynamodb_table(self): dynamodb_table = DynamoDBTable(self.logical_id, depends_on=self.depends_on, attributes=self.resource_attributes) if self.PrimaryKey: primary_key = { 'AttributeName': self.PrimaryKey['Name'], 'AttributeType': self._convert_attribute_type(self.PrimaryKey['Type']) } else: primary_key = {'AttributeName': 'id', 'AttributeType': 'S'} dynamodb_table.AttributeDefinitions = [primary_key] dynamodb_table.KeySchema = [{ 'AttributeName': primary_key['AttributeName'], 'KeyType': 'HASH' }] if self.ProvisionedThroughput: dynamodb_table.ProvisionedThroughput = self.ProvisionedThroughput else: dynamodb_table.BillingMode = 'PAY_PER_REQUEST' if self.SSESpecification: dynamodb_table.SSESpecification = self.SSESpecification if self.TableName: dynamodb_table.TableName = self.TableName if bool(self.Tags): dynamodb_table.Tags = get_tag_list(self.Tags) return dynamodb_table def _convert_attribute_type(self, attribute_type): if attribute_type in self.attribute_type_conversions: return self.attribute_type_conversions[attribute_type] raise InvalidResourceException(self.logical_id, 'Invalid \'Type\' "{actual}".'.format(actual=attribute_type))
class LambdaFunction(Resource): resource_type = 'AWS::Lambda::Function' property_types = { 'Code': PropertyType(True, is_type(dict)), 'DeadLetterConfig': PropertyType(False, is_type(dict)), 'Description': PropertyType(False, is_str()), 'FunctionName': PropertyType(False, is_str()), 'Handler': PropertyType(True, is_str()), 'MemorySize': PropertyType(False, is_type(int)), 'Role': PropertyType(False, is_str()), 'Runtime': PropertyType(False, is_str()), 'Timeout': PropertyType(False, is_type(int)), 'VpcConfig': PropertyType(False, is_type(dict)), 'Environment': PropertyType(False, is_type(dict)), 'Tags': PropertyType(False, list_of(is_type(dict))), 'TracingConfig': PropertyType(False, is_type(dict)), 'KmsKeyArn': PropertyType(False, one_of(is_type(dict), is_str())), 'Layers': PropertyType(False, list_of(one_of(is_str(), is_type(dict)))), 'ReservedConcurrentExecutions': PropertyType(False, any_type()) } runtime_attrs = { "name": lambda self: ref(self.logical_id), "arn": lambda self: fnGetAtt(self.logical_id, "Arn") }
class DynamoDBTable(Resource): resource_type = 'AWS::DynamoDB::Table' property_types = { 'AttributeDefinitions': PropertyType(True, list_of(is_type(dict))), 'GlobalSecondaryIndexes': PropertyType(False, list_of(is_type(dict))), 'KeySchema': PropertyType(False, list_of(is_type(dict))), 'LocalSecondaryIndexes': PropertyType(False, list_of(is_type(dict))), 'ProvisionedThroughput': PropertyType(True, dict_of(is_str(), one_of(is_type(int), is_type(dict)))), 'StreamSpecification': PropertyType(False, is_type(dict)), 'TableName': PropertyType(False, one_of(is_str(), is_type(dict))), 'Tags': PropertyType(False, list_of(is_type(dict))), 'SSESpecification': PropertyType(False, is_type(dict)) } runtime_attrs = { "name": lambda self: ref(self.logical_id), "arn": lambda self: fnGetAtt(self.logical_id, "Arn"), "stream_arn": lambda self: fnGetAtt(self.logical_id, "StreamArn") }
class LambdaFunction(Resource): resource_type = "AWS::Lambda::Function" property_types = { "Code": PropertyType(True, is_type(dict)), "DeadLetterConfig": PropertyType(False, is_type(dict)), "Description": PropertyType(False, is_str()), "FunctionName": PropertyType(False, is_str()), "Handler": PropertyType(True, is_str()), "MemorySize": PropertyType(False, is_type(int)), "Role": PropertyType(False, is_str()), "Runtime": PropertyType(False, is_str()), "Timeout": PropertyType(False, is_type(int)), "VpcConfig": PropertyType(False, is_type(dict)), "Environment": PropertyType(False, is_type(dict)), "Tags": PropertyType(False, list_of(is_type(dict))), "TracingConfig": PropertyType(False, is_type(dict)), "KmsKeyArn": PropertyType(False, one_of(is_type(dict), is_str())), "Layers": PropertyType(False, list_of(one_of(is_str(), is_type(dict)))), "ReservedConcurrentExecutions": PropertyType(False, any_type()), } runtime_attrs = { "name": lambda self: ref(self.logical_id), "arn": lambda self: fnGetAtt(self.logical_id, "Arn") }
class SamApi(SamResourceMacro): """SAM rest API macro. """ resource_type = 'AWS::Serverless::Api' property_types = { # Internal property set only by Implicit API plugin. If set to True, the API Event Source code will inject # Lambda Integration URI to the Swagger. To preserve backwards compatibility, this must be set only for # Implicit APIs. For Explicit APIs, customer is expected to set integration URI themselves. # In the future, we might rename and expose this property to customers so they can have SAM manage Explicit APIs # Swagger. '__MANAGE_SWAGGER': PropertyType(False, is_type(bool)), 'Name': PropertyType(False, one_of(is_str(), is_type(dict))), 'StageName': PropertyType(True, one_of(is_str(), is_type(dict))), 'DefinitionBody': PropertyType(False, is_type(dict)), 'DefinitionUri': PropertyType(False, one_of(is_str(), is_type(dict))), 'CacheClusterEnabled': PropertyType(False, is_type(bool)), 'CacheClusterSize': PropertyType(False, is_str()), 'Variables': PropertyType(False, is_type(dict)), 'EndpointConfiguration': PropertyType(False, is_str()), 'MethodSettings': PropertyType(False, is_type(list)), 'BinaryMediaTypes': PropertyType(False, is_type(list)), 'Cors': PropertyType(False, one_of(is_str(), is_type(dict))) } referable_properties = { "Stage": ApiGatewayStage.resource_type, "Deployment": ApiGatewayDeployment.resource_type, } def to_cloudformation(self, **kwargs): """Returns the API Gateway RestApi, Deployment, and Stage to which this SAM Api corresponds. :param dict kwargs: already-converted resources that may need to be modified when converting this \ macro to pure CloudFormation :returns: a list of vanilla CloudFormation Resources, to which this Function expands :rtype: list """ resources = [] api_generator = ApiGenerator(self.logical_id, self.CacheClusterEnabled, self.CacheClusterSize, self.Variables, self.depends_on, self.DefinitionBody, self.DefinitionUri, self.Name, self.StageName, endpoint_configuration=self.EndpointConfiguration, method_settings=self.MethodSettings, binary_media=self.BinaryMediaTypes, cors=self.Cors) rest_api, deployment, stage = api_generator.to_cloudformation() resources.extend([rest_api, deployment, stage]) return resources
class SamHttpApi(SamResourceMacro): """SAM rest API macro. """ resource_type = 'AWS::Serverless::HttpApi' property_types = { # Internal property set only by Implicit HTTP API plugin. If set to True, the API Event Source code will # inject Lambda Integration URI to the OpenAPI. To preserve backwards compatibility, this must be set only for # Implicit APIs. For Explicit APIs, this is managed by the DefaultDefinitionBody Plugin. # In the future, we might rename and expose this property to customers so they can have SAM manage Explicit APIs # Swagger. '__MANAGE_SWAGGER': PropertyType(False, is_type(bool)), 'StageName': PropertyType(False, one_of(is_str(), is_type(dict))), 'Tags': PropertyType(False, is_type(dict)), 'DefinitionBody': PropertyType(False, is_type(dict)), 'DefinitionUri': PropertyType(False, one_of(is_str(), is_type(dict))), 'StageVariables': PropertyType(False, is_type(dict)), 'Cors': PropertyType(False, one_of(is_str(), is_type(dict))), 'AccessLogSettings': PropertyType(False, is_type(dict)), 'Auth': PropertyType(False, is_type(dict)) } referable_properties = { "Stage": ApiGatewayV2Stage.resource_type, } def to_cloudformation(self, **kwargs): """Returns the API Gateway RestApi, Deployment, and Stage to which this SAM Api corresponds. :param dict kwargs: already-converted resources that may need to be modified when converting this \ macro to pure CloudFormation :returns: a list of vanilla CloudFormation Resources, to which this Function expands :rtype: list """ resources = [] api_generator = HttpApiGenerator( self.logical_id, self.StageVariables, self.depends_on, self.DefinitionBody, self.DefinitionUri, self.StageName, tags=self.Tags, auth=self.Auth, access_log_settings=self.AccessLogSettings, resource_attributes=self.resource_attributes, passthrough_resource_attributes=self. get_passthrough_resource_attributes()) http_api, stage = api_generator.to_cloudformation() resources.append(http_api) # Stage is now optional. Only add it if one is created. if stage: resources.append(stage) return resources
class LambdaAlias(Resource): resource_type = 'AWS::Lambda::Alias' property_types = { 'Description': PropertyType(False, is_str()), 'Name': PropertyType(False, is_str()), 'FunctionName': PropertyType(True, one_of(is_str(), is_type(dict))), 'FunctionVersion': PropertyType(True, one_of(is_str(), is_type(dict))) } runtime_attrs = {"arn": lambda self: ref(self.logical_id)}
class LambdaAlias(Resource): resource_type = "AWS::Lambda::Alias" property_types = { "Description": PropertyType(False, is_str()), "Name": PropertyType(False, is_str()), "FunctionName": PropertyType(True, one_of(is_str(), is_type(dict))), "FunctionVersion": PropertyType(True, one_of(is_str(), is_type(dict))), "ProvisionedConcurrencyConfig": PropertyType(False, is_type(dict)), } runtime_attrs = {"arn": lambda self: ref(self.logical_id)}
class SamApplication(SamResourceMacro): """SAM application macro. """ APPLICATION_ID_KEY = 'ApplicationId' SEMANTIC_VERSION_KEY = 'SemanticVersion' resource_type = 'AWS::Serverless::Application' # The plugin will always insert the TemplateUrl parameter property_types = { 'Location': PropertyType(True, one_of(is_str(), is_type(dict))), 'TemplateUrl': PropertyType(False, is_str()), 'Parameters': PropertyType(False, is_type(dict)), 'NotificationARNs': PropertyType(False, list_of(one_of(is_str(), is_type(dict)))), 'Tags': PropertyType(False, is_type(dict)), 'TimeoutInMinutes': PropertyType(False, is_type(int)) } def to_cloudformation(self, **kwargs): """Returns the stack with the proper parameters for this application """ nested_stack = self._construct_nested_stack() return [nested_stack] def _construct_nested_stack(self): """Constructs a AWS::CloudFormation::Stack resource """ nested_stack = NestedStack(self.logical_id, depends_on=self.depends_on, attributes=self.get_passthrough_resource_attributes()) nested_stack.Parameters = self.Parameters nested_stack.NotificationARNs = self.NotificationARNs application_tags = self._get_application_tags() nested_stack.Tags = self._construct_tag_list(self.Tags, application_tags) nested_stack.TimeoutInMinutes = self.TimeoutInMinutes nested_stack.TemplateURL = self.TemplateUrl if self.TemplateUrl else "" return nested_stack def _get_application_tags(self): """Adds tags to the stack if this resource is using the serverless app repo """ application_tags = {} if isinstance(self.Location, dict): if (self.APPLICATION_ID_KEY in self.Location.keys() and self.Location[self.APPLICATION_ID_KEY] is not None): application_tags[self._SAR_APP_KEY] = self.Location[self.APPLICATION_ID_KEY] if (self.SEMANTIC_VERSION_KEY in self.Location.keys() and self.Location[self.SEMANTIC_VERSION_KEY] is not None): application_tags[self._SAR_SEMVER_KEY] = self.Location[self.SEMANTIC_VERSION_KEY] return application_tags
class CodeDeployApplication(Resource): resource_type = "AWS::CodeDeploy::Application" property_types = { "ComputePlatform": PropertyType(False, one_of(is_str(), is_type(dict))) } runtime_attrs = {"name": lambda self: ref(self.logical_id)}
def test_one_of_validator(value, validators, should_pass): validate = one_of(*validators) if should_pass: assert validate(value), "one_of validator failed for validators {}, value {}".format(validators, value) else: assert not validate(value, should_raise=False), "one_of validator unexpectedly succeeded for validators {}, value {}".format(validators, value) with pytest.raises(TypeError): validate(value)
class CodeDeployDeploymentGroup(Resource): resource_type = "AWS::CodeDeploy::DeploymentGroup" property_types = { "AlarmConfiguration": PropertyType(False, is_type(dict)), "ApplicationName": PropertyType(True, one_of(is_str(), is_type(dict))), "AutoRollbackConfiguration": PropertyType(False, is_type(dict)), "DeploymentConfigName": PropertyType(False, one_of(is_str(), is_type(dict))), "DeploymentStyle": PropertyType(False, is_type(dict)), "ServiceRoleArn": PropertyType(True, one_of(is_str(), is_type(dict))), "TriggerConfigurations": PropertyType(False, is_type(list)), } runtime_attrs = {"name": lambda self: ref(self.logical_id)}
class CodeDeployDeploymentGroup(Resource): resource_type = 'AWS::CodeDeploy::DeploymentGroup' property_types = { 'AlarmConfiguration': PropertyType(False, is_type(dict)), 'ApplicationName': PropertyType(True, one_of(is_str(), is_type(dict))), 'AutoRollbackConfiguration': PropertyType(False, is_type(dict)), 'DeploymentConfigName': PropertyType(False, one_of(is_str(), is_type(dict))), 'DeploymentStyle': PropertyType(False, is_type(dict)), 'ServiceRoleArn': PropertyType(True, one_of(is_str(), is_type(dict))), } runtime_attrs = { "name": lambda self: ref(self.logical_id), }
class DynamoDBTable(Resource): resource_type = "AWS::DynamoDB::Table" property_types = { "AttributeDefinitions": PropertyType(True, list_of(is_type(dict))), "GlobalSecondaryIndexes": PropertyType(False, list_of(is_type(dict))), "KeySchema": PropertyType(False, list_of(is_type(dict))), "LocalSecondaryIndexes": PropertyType(False, list_of(is_type(dict))), "ProvisionedThroughput": PropertyType(False, dict_of(is_str(), one_of(is_type(int), is_type(dict)))), "StreamSpecification": PropertyType(False, is_type(dict)), "TableName": PropertyType(False, one_of(is_str(), is_type(dict))), "Tags": PropertyType(False, list_of(is_type(dict))), "SSESpecification": PropertyType(False, is_type(dict)), "BillingMode": PropertyType(False, is_str()), } runtime_attrs = { "name": lambda self: ref(self.logical_id), "arn": lambda self: fnGetAtt(self.logical_id, "Arn"), "stream_arn": lambda self: fnGetAtt(self.logical_id, "StreamArn"), }
class LambdaVersion(Resource): resource_type = 'AWS::Lambda::Version' property_types = { 'CodeSha256': PropertyType(False, is_str()), 'Description': PropertyType(False, is_str()), 'FunctionName': PropertyType(True, one_of(is_str(), is_type(dict))) } runtime_attrs = { "arn": lambda self: ref(self.logical_id), "version": lambda self: fnGetAtt(self.logical_id, "Version") }
class LambdaLayerVersion(Resource): """Lambda layer version resource""" resource_type = "AWS::Lambda::LayerVersion" property_types = { "Content": PropertyType(True, is_type(dict)), "Description": PropertyType(False, is_str()), "LayerName": PropertyType(False, is_str()), "CompatibleRuntimes": PropertyType(False, list_of(one_of(is_str(), is_type(dict)))), "LicenseInfo": PropertyType(False, is_str()), } runtime_attrs = {"name": lambda self: ref(self.logical_id), "arn": lambda self: fnGetAtt(self.logical_id, "Arn")}
class ApiGatewayV2Stage(Resource): resource_type = "AWS::ApiGatewayV2::Stage" property_types = { "AccessLogSettings": PropertyType(False, is_type(dict)), "DefaultRouteSettings": PropertyType(False, is_type(dict)), "ClientCertificateId": PropertyType(False, is_str()), "Description": PropertyType(False, is_str()), "ApiId": PropertyType(True, is_str()), "StageName": PropertyType(False, one_of(is_str(), is_type(dict))), "Tags": PropertyType(False, list_of(is_type(dict))), "StageVariables": PropertyType(False, is_type(dict)), "AutoDeploy": PropertyType(False, is_type(bool)), } runtime_attrs = {"stage_name": lambda self: ref(self.logical_id)}
class SNS(PushEventSource): """SNS topic event source for SAM Functions.""" resource_type = 'SNS' principal = 'sns.amazonaws.com' property_types = { 'Topic': PropertyType(True, is_str()), 'Region': PropertyType(False, is_str()), 'FilterPolicy': PropertyType( False, dict_of(is_str(), list_of(one_of(is_str(), is_type(dict))))) } def to_cloudformation(self, **kwargs): """Returns the Lambda Permission resource allowing SNS to invoke the function this event source triggers. :param dict kwargs: no existing resources need to be modified :returns: a list of vanilla CloudFormation Resources, to which this SNS event expands :rtype: list """ function = kwargs.get('function') if not function: raise TypeError("Missing required keyword argument: function") return [ self._construct_permission(function, source_arn=self.Topic), self._inject_subscription(function, self.Topic, self.Region, self.FilterPolicy) ] def _inject_subscription(self, function, topic, region, filterPolicy): subscription = SNSSubscription(self.logical_id) subscription.Protocol = 'lambda' subscription.Endpoint = function.get_runtime_attr("arn") subscription.TopicArn = topic if region is not None: subscription.Region = region if CONDITION in function.resource_attributes: subscription.set_resource_attribute( CONDITION, function.resource_attributes[CONDITION]) if filterPolicy is not None: subscription.FilterPolicy = filterPolicy return subscription
class NestedStack(Resource): resource_type = 'AWS::CloudFormation::Stack' # TODO: support passthrough parameters for stacks (Conditions, etc) property_types = { 'TemplateURL': PropertyType(True, is_str()), 'Parameters': PropertyType(False, is_type(dict)), 'NotificationARNs': PropertyType(False, list_of(one_of(is_str(), is_type(dict)))), 'Tags': PropertyType(False, list_of(is_type(dict))), 'TimeoutInMinutes': PropertyType(False, is_type(int)) } runtime_attrs = {"stack_id": lambda self: ref(self.logical_id)}
class ApiGatewayV2Stage(Resource): resource_type = 'AWS::ApiGatewayV2::Stage' property_types = { 'AccessLogSettings': PropertyType(False, is_type(dict)), 'DefaultRouteSettings': PropertyType(False, is_type(dict)), 'ClientCertificateId': PropertyType(False, is_str()), 'Description': PropertyType(False, is_str()), 'ApiId': PropertyType(True, is_str()), 'StageName': PropertyType(False, one_of(is_str(), is_type(dict))), 'Tags': PropertyType(False, list_of(is_type(dict))), 'StageVariables': PropertyType(False, is_type(dict)), 'AutoDeploy': PropertyType(False, is_type(bool)) } runtime_attrs = { "stage_name": lambda self: ref(self.logical_id), }
class ApiGatewayStage(Resource): resource_type = 'AWS::ApiGateway::Stage' property_types = { 'CacheClusterEnabled': PropertyType(False, is_type(bool)), 'CacheClusterSize': PropertyType(False, is_str()), 'ClientCertificateId': PropertyType(False, is_str()), 'DeploymentId': PropertyType(True, is_str()), 'Description': PropertyType(False, is_str()), 'RestApiId': PropertyType(True, is_str()), 'StageName': PropertyType(True, one_of(is_str(), is_type(dict))), 'Variables': PropertyType(False, is_type(dict)), "MethodSettings": PropertyType(False, is_type(list)) } runtime_attrs = { "stage_name": lambda self: ref(self.logical_id), } def update_deployment_ref(self, deployment_logical_id): self.DeploymentId = ref(deployment_logical_id)
class ApiGatewayStage(Resource): resource_type = "AWS::ApiGateway::Stage" property_types = { "AccessLogSetting": PropertyType(False, is_type(dict)), "CacheClusterEnabled": PropertyType(False, is_type(bool)), "CacheClusterSize": PropertyType(False, is_str()), "CanarySetting": PropertyType(False, is_type(dict)), "ClientCertificateId": PropertyType(False, is_str()), "DeploymentId": PropertyType(True, is_str()), "Description": PropertyType(False, is_str()), "RestApiId": PropertyType(True, is_str()), "StageName": PropertyType(True, one_of(is_str(), is_type(dict))), "Tags": PropertyType(False, list_of(is_type(dict))), "TracingEnabled": PropertyType(False, is_type(bool)), "Variables": PropertyType(False, is_type(dict)), "MethodSettings": PropertyType(False, is_type(list)), } runtime_attrs = {"stage_name": lambda self: ref(self.logical_id)} def update_deployment_ref(self, deployment_logical_id): self.DeploymentId = ref(deployment_logical_id)
class Cognito(PushEventSource): resource_type = "Cognito" principal = "cognito-idp.amazonaws.com" property_types = { "UserPool": PropertyType(True, is_str()), "Trigger": PropertyType(True, one_of(is_str(), list_of(is_str()))), } def resources_to_link(self, resources): if isinstance(self.UserPool, dict) and "Ref" in self.UserPool: userpool_id = self.UserPool["Ref"] if userpool_id in resources: return {"userpool": resources[userpool_id], "userpool_id": userpool_id} raise InvalidEventException( self.relative_id, "Cognito events must reference a Cognito UserPool in the same template." ) def to_cloudformation(self, **kwargs): function = kwargs.get("function") if not function: raise TypeError("Missing required keyword argument: function") if "userpool" not in kwargs or kwargs["userpool"] is None: raise TypeError("Missing required keyword argument: userpool") if "userpool_id" not in kwargs or kwargs["userpool_id"] is None: raise TypeError("Missing required keyword argument: userpool_id") userpool = kwargs["userpool"] userpool_id = kwargs["userpool_id"] resources = [] source_arn = fnGetAtt(userpool_id, "Arn") resources.append( self._construct_permission(function, source_arn=source_arn, prefix=function.logical_id + "Cognito") ) self._inject_lambda_config(function, userpool) resources.append(CognitoUserPool.from_dict(userpool_id, userpool)) return resources def _inject_lambda_config(self, function, userpool): event_triggers = self.Trigger if isinstance(self.Trigger, string_types): event_triggers = [self.Trigger] # TODO can these be conditional? properties = userpool.get("Properties", None) if properties is None: properties = {} userpool["Properties"] = properties lambda_config = properties.get("LambdaConfig", None) if lambda_config is None: lambda_config = {} properties["LambdaConfig"] = lambda_config for event_trigger in event_triggers: if event_trigger not in lambda_config: lambda_config[event_trigger] = function.get_runtime_attr("arn") else: raise InvalidEventException( self.relative_id, 'Cognito trigger "{trigger}" defined multiple times.'.format(trigger=self.Trigger) ) return userpool
class Cognito(PushEventSource): resource_type = 'Cognito' principal = 'cognito-idp.amazonaws.com' property_types = { 'UserPool': PropertyType(True, is_str()), 'Trigger': PropertyType(True, one_of(is_str(), list_of(is_str()))) } def resources_to_link(self, resources): if isinstance(self.UserPool, dict) and 'Ref' in self.UserPool: userpool_id = self.UserPool['Ref'] if userpool_id in resources: return { 'userpool': resources[userpool_id], 'userpool_id': userpool_id } raise InvalidEventException( self.relative_id, "Cognito events must reference a Cognito UserPool in the same template.") def to_cloudformation(self, **kwargs): function = kwargs.get('function') if not function: raise TypeError("Missing required keyword argument: function") if 'userpool' not in kwargs or kwargs['userpool'] is None: raise TypeError("Missing required keyword argument: userpool") if 'userpool_id' not in kwargs or kwargs['userpool_id'] is None: raise TypeError("Missing required keyword argument: userpool_id") userpool = kwargs['userpool'] userpool_id = kwargs['userpool_id'] resources = [] resources.append( self._construct_permission( function, event_source_token=self.UserPool, prefix=function.logical_id + "Cognito")) self._inject_lambda_config(function, userpool) resources.append(CognitoUserPool.from_dict(userpool_id, userpool)) return resources def _inject_lambda_config(self, function, userpool): event_triggers = self.Trigger if isinstance(self.Trigger, string_types): event_triggers = [self.Trigger] # TODO can these be conditional? properties = userpool.get('Properties', None) if properties is None: properties = {} userpool['Properties'] = properties lambda_config = properties.get('LambdaConfig', None) if lambda_config is None: lambda_config = {} properties['LambdaConfig'] = lambda_config for event_trigger in event_triggers: if event_trigger not in lambda_config: lambda_config[event_trigger] = function.get_runtime_attr("arn") else: raise InvalidEventException( self.relative_id, 'Cognito trigger "{trigger}" defined multiple times.'.format( trigger=self.Trigger)) return userpool
class ApiGatewayAccount(Resource): resource_type = 'AWS::ApiGateway::Account' property_types = { 'CloudWatchRoleArn': PropertyType(False, one_of(is_str(), is_type(dict))) }
class SNS(PushEventSource): """SNS topic event source for SAM Functions.""" resource_type = 'SNS' principal = 'sns.amazonaws.com' property_types = { 'Topic': PropertyType(True, is_str()), 'Region': PropertyType(False, is_str()), 'FilterPolicy': PropertyType(False, dict_of(is_str(), list_of(one_of(is_str(), is_type(dict))))), 'SqsSubscription': PropertyType(False, is_type(bool)) } def to_cloudformation(self, **kwargs): """Returns the Lambda Permission resource allowing SNS to invoke the function this event source triggers. :param dict kwargs: no existing resources need to be modified :returns: a list of vanilla CloudFormation Resources, to which this SNS event expands :rtype: list """ function = kwargs.get('function') role = kwargs.get('role') if not function: raise TypeError("Missing required keyword argument: function") # SNS -> Lambda if not self.SqsSubscription: subscription = self._inject_subscription( 'lambda', function.get_runtime_attr("arn"), self.Topic, self.Region, self.FilterPolicy, function.resource_attributes ) return [self._construct_permission(function, source_arn=self.Topic), subscription] # SNS -> SQS -> Lambda resources = [] queue = self._inject_sqs_queue() queue_policy = self._inject_sqs_queue_policy(self.Topic, queue) subscription = self._inject_subscription( 'sqs', queue.get_runtime_attr('arn'), self.Topic, self.Region, self.FilterPolicy, function.resource_attributes ) resources = resources + self._inject_sqs_event_source_mapping(function, role, queue.get_runtime_attr('arn')) resources.append(queue) resources.append(queue_policy) resources.append(subscription) return resources def _inject_subscription(self, protocol, endpoint, topic, region, filterPolicy, resource_attributes): subscription = SNSSubscription(self.logical_id) subscription.Protocol = protocol subscription.Endpoint = endpoint subscription.TopicArn = topic if region is not None: subscription.Region = region if CONDITION in resource_attributes: subscription.set_resource_attribute(CONDITION, resource_attributes[CONDITION]) if filterPolicy is not None: subscription.FilterPolicy = filterPolicy return subscription def _inject_sqs_queue(self): return SQSQueue(self.logical_id + 'Queue') def _inject_sqs_event_source_mapping(self, function, role, queue_arn): event_source = SQS(self.logical_id + 'EventSourceMapping') event_source.Queue = queue_arn event_source.BatchSize = 10 event_source.Enabled = True return event_source.to_cloudformation(function=function, role=role) def _inject_sqs_queue_policy(self, topic_arn, queue): policy = SQSQueuePolicy(self.logical_id + 'QueuePolicy') policy.PolicyDocument = SQSQueuePolicies.sns_topic_send_message_role_policy( topic_arn, queue.get_runtime_attr('arn') ) policy.Queues = [queue.get_runtime_attr('queue_url')] return policy
class S3(PushEventSource): """S3 bucket event source for SAM Functions.""" resource_type = 'S3' principal = 's3.amazonaws.com' property_types = { 'Bucket': PropertyType(True, is_str()), 'Events': PropertyType(True, one_of(is_str(), list_of(is_str()))), 'Filter': PropertyType(False, dict_of(is_str(), is_str())) } def resources_to_link(self, resources): if isinstance(self.Bucket, dict) and 'Ref' in self.Bucket: bucket_id = self.Bucket['Ref'] if bucket_id in resources: return { 'bucket': resources[bucket_id], 'bucket_id': bucket_id } raise InvalidEventException(self.relative_id, "S3 events must reference an S3 bucket in the same template.") def to_cloudformation(self, **kwargs): """Returns the Lambda Permission resource allowing S3 to invoke the function this event source triggers. :param dict kwargs: S3 bucket resource :returns: a list of vanilla CloudFormation Resources, to which this S3 event expands :rtype: list """ function = kwargs.get('function') if not function: raise TypeError("Missing required keyword argument: function") if 'bucket' not in kwargs or kwargs['bucket'] is None: raise TypeError("Missing required keyword argument: bucket") if 'bucket_id' not in kwargs or kwargs['bucket_id'] is None: raise TypeError("Missing required keyword argument: bucket_id") bucket = kwargs['bucket'] bucket_id = kwargs['bucket_id'] resources = [] source_account = ref('AWS::AccountId') permission = self._construct_permission(function, source_account=source_account) if CONDITION in permission.resource_attributes: self._depend_on_lambda_permissions_using_tag(bucket, permission) else: self._depend_on_lambda_permissions(bucket, permission) resources.append(permission) # NOTE: `bucket` here is a dictionary representing the S3 Bucket resource in your SAM template. If there are # multiple S3 Events attached to the same bucket, we will update the Bucket resource with notification # configuration for each event. This is the reason why we continue to use existing bucket dict and append onto # it. # # NOTE: There is some fragile logic here where we will append multiple resources to output # SAM template but de-dupe them when merging into output CFN template. This is scary because the order of # merging is literally "last one wins", which works fine because we linearly loop through the template once. # The de-dupe happens inside `samtranslator.translator.Translator.translate` method when merging results of # to_cloudformation() to output template. self._inject_notification_configuration(function, bucket) resources.append(S3Bucket.from_dict(bucket_id, bucket)) return resources def _depend_on_lambda_permissions(self, bucket, permission): """ Make the S3 bucket depends on Lambda Permissions resource because when S3 adds a Notification Configuration, it will check whether it has permissions to access Lambda. This will fail if the Lambda::Permissions is not already applied for this bucket to invoke the Lambda. :param dict bucket: Dictionary representing the bucket in SAM template. This is a raw dictionary and not a "resource" object :param model.lambda_.lambda_permission permission: Lambda Permission resource that needs to be created before the bucket. :return: Modified Bucket dictionary """ depends_on = bucket.get("DependsOn", []) # DependsOn can be either a list of strings or a scalar string if isinstance(depends_on, string_types): depends_on = [depends_on] depends_on_set = set(depends_on) depends_on_set.add(permission.logical_id) bucket["DependsOn"] = list(depends_on_set) return bucket def _depend_on_lambda_permissions_using_tag(self, bucket, permission): """ Since conditional DependsOn is not supported this undocumented way of implicitely making dependency through tags is used. See https://stackoverflow.com/questions/34607476/cloudformation-apply-condition-on-dependson It is done by using Ref wrapped in a conditional Fn::If. Using Ref implies a dependency, so CloudFormation will automatically wait once it reaches that function, the same as if you were using a DependsOn. """ properties = bucket.get('Properties', None) if properties is None: properties = {} bucket['Properties'] = properties tags = properties.get('Tags', None) if tags is None: tags = [] properties['Tags'] = tags dep_tag = { 'sam:ConditionalDependsOn:' + permission.logical_id: { 'Fn::If': [ permission.resource_attributes[CONDITION], ref(permission.logical_id), 'no dependency' ] } } properties['Tags'] = tags + get_tag_list(dep_tag) return bucket def _inject_notification_configuration(self, function, bucket): base_event_mapping = { 'Function': function.get_runtime_attr("arn") } if self.Filter is not None: base_event_mapping['Filter'] = self.Filter event_types = self.Events if isinstance(self.Events, string_types): event_types = [self.Events] event_mappings = [] for event_type in event_types: lambda_event = copy.deepcopy(base_event_mapping) lambda_event['Event'] = event_type if CONDITION in function.resource_attributes: lambda_event = make_conditional(function.resource_attributes[CONDITION], lambda_event) event_mappings.append(lambda_event) properties = bucket.get('Properties', None) if properties is None: properties = {} bucket['Properties'] = properties notification_config = properties.get('NotificationConfiguration', None) if notification_config is None: notification_config = {} properties['NotificationConfiguration'] = notification_config lambda_notifications = notification_config.get('LambdaConfigurations', None) if lambda_notifications is None: lambda_notifications = [] notification_config['LambdaConfigurations'] = lambda_notifications for event_mapping in event_mappings: if event_mapping not in lambda_notifications: lambda_notifications.append(event_mapping) return bucket
class SamApi(SamResourceMacro): """SAM rest API macro. """ resource_type = 'AWS::Serverless::Api' property_types = { # Internal property set only by Implicit API plugin. If set to True, the API Event Source code will inject # Lambda Integration URI to the Swagger. To preserve backwards compatibility, this must be set only for # Implicit APIs. For Explicit APIs, customer is expected to set integration URI themselves. # In the future, we might rename and expose this property to customers so they can have SAM manage Explicit APIs # Swagger. '__MANAGE_SWAGGER': PropertyType(False, is_type(bool)), 'Name': PropertyType(False, one_of(is_str(), is_type(dict))), 'StageName': PropertyType(True, one_of(is_str(), is_type(dict))), 'Tags': PropertyType(False, is_type(dict)), 'DefinitionBody': PropertyType(False, is_type(dict)), 'DefinitionUri': PropertyType(False, one_of(is_str(), is_type(dict))), 'CacheClusterEnabled': PropertyType(False, is_type(bool)), 'CacheClusterSize': PropertyType(False, is_str()), 'Variables': PropertyType(False, is_type(dict)), 'EndpointConfiguration': PropertyType(False, is_str()), 'MethodSettings': PropertyType(False, is_type(list)), 'BinaryMediaTypes': PropertyType(False, is_type(list)), 'MinimumCompressionSize': PropertyType(False, is_type(int)), 'Cors': PropertyType(False, one_of(is_str(), is_type(dict))), 'Auth': PropertyType(False, is_type(dict)), 'GatewayResponses': PropertyType(False, is_type(dict)), 'AccessLogSetting': PropertyType(False, is_type(dict)), 'CanarySetting': PropertyType(False, is_type(dict)), 'TracingEnabled': PropertyType(False, is_type(bool)), 'OpenApiVersion': PropertyType(False, is_str()), 'Models': PropertyType(False, is_type(dict)), 'Domain': PropertyType(False, is_type(dict)) } referable_properties = { "Stage": ApiGatewayStage.resource_type, "Deployment": ApiGatewayDeployment.resource_type, "DomainName": ApiGatewayDomainName.resource_type } def to_cloudformation(self, **kwargs): """Returns the API Gateway RestApi, Deployment, and Stage to which this SAM Api corresponds. :param dict kwargs: already-converted resources that may need to be modified when converting this \ macro to pure CloudFormation :returns: a list of vanilla CloudFormation Resources, to which this Function expands :rtype: list """ resources = [] intrinsics_resolver = kwargs["intrinsics_resolver"] self.BinaryMediaTypes = intrinsics_resolver.resolve_parameter_refs(self.BinaryMediaTypes) self.Domain = intrinsics_resolver.resolve_parameter_refs(self.Domain) api_generator = ApiGenerator(self.logical_id, self.CacheClusterEnabled, self.CacheClusterSize, self.Variables, self.depends_on, self.DefinitionBody, self.DefinitionUri, self.Name, self.StageName, tags=self.Tags, endpoint_configuration=self.EndpointConfiguration, method_settings=self.MethodSettings, binary_media=self.BinaryMediaTypes, minimum_compression_size=self.MinimumCompressionSize, cors=self.Cors, auth=self.Auth, gateway_responses=self.GatewayResponses, access_log_setting=self.AccessLogSetting, canary_setting=self.CanarySetting, tracing_enabled=self.TracingEnabled, resource_attributes=self.resource_attributes, passthrough_resource_attributes=self.get_passthrough_resource_attributes(), open_api_version=self.OpenApiVersion, models=self.Models, domain=self.Domain) rest_api, deployment, stage, permissions, domain, basepath_mapping, route53 = api_generator.to_cloudformation() resources.extend([rest_api, deployment, stage]) resources.extend(permissions) if domain: resources.extend([domain]) if basepath_mapping: resources.extend(basepath_mapping) if route53: resources.extend([route53]) return resources
class SamLayerVersion(SamResourceMacro): """ SAM Layer macro """ resource_type = 'AWS::Serverless::LayerVersion' property_types = { 'LayerName': PropertyType(False, one_of(is_str(), is_type(dict))), 'Description': PropertyType(False, is_str()), 'ContentUri': PropertyType(True, one_of(is_str(), is_type(dict))), 'CompatibleRuntimes': PropertyType(False, list_of(is_str())), 'LicenseInfo': PropertyType(False, is_str()), 'RetentionPolicy': PropertyType(False, is_str()) } RETAIN = 'Retain' DELETE = 'Delete' retention_policy_options = [RETAIN.lower(), DELETE.lower()] def to_cloudformation(self, **kwargs): """Returns the Lambda layer to which this SAM Layer corresponds. :param dict kwargs: already-converted resources that may need to be modified when converting this \ macro to pure CloudFormation :returns: a list of vanilla CloudFormation Resources, to which this Function expands :rtype: list """ resources = [] # Append any CFN resources: intrinsics_resolver = kwargs["intrinsics_resolver"] resources.append(self._construct_lambda_layer(intrinsics_resolver)) return resources def _construct_lambda_layer(self, intrinsics_resolver): """Constructs and returns the Lambda function. :returns: a list containing the Lambda function and execution role resources :rtype: list """ # Resolve intrinsics if applicable: self.LayerName = self._resolve_string_parameter(intrinsics_resolver, self.LayerName, 'LayerName') self.LicenseInfo = self._resolve_string_parameter(intrinsics_resolver, self.LicenseInfo, 'LicenseInfo') self.Description = self._resolve_string_parameter(intrinsics_resolver, self.Description, 'Description') self.RetentionPolicy = self._resolve_string_parameter(intrinsics_resolver, self.RetentionPolicy, 'RetentionPolicy') retention_policy_value = self._get_retention_policy_value() attributes = self.get_passthrough_resource_attributes() if attributes is None: attributes = {} attributes['DeletionPolicy'] = retention_policy_value old_logical_id = self.logical_id new_logical_id = logical_id_generator.LogicalIdGenerator(old_logical_id, self.to_dict()).gen() self.logical_id = new_logical_id lambda_layer = LambdaLayerVersion(self.logical_id, depends_on=self.depends_on, attributes=attributes) # Changing the LayerName property: when a layer is published, it is given an Arn # example: arn:aws:lambda:us-west-2:123456789012:layer:MyLayer:1 # where MyLayer is the LayerName property if it exists; otherwise, it is the # LogicalId of this resource. Since a LayerVersion is an immutable resource, when # CloudFormation updates this resource, it will ALWAYS create a new version then # delete the old version if the logical ids match. What this does is change the # logical id of every layer (so a `DeletionPolicy: Retain` can work) and set the # LayerName property of the layer so that the Arn will still always be the same # with the exception of an incrementing version number. if not self.LayerName: self.LayerName = old_logical_id lambda_layer.LayerName = self.LayerName lambda_layer.Description = self.Description lambda_layer.Content = construct_s3_location_object(self.ContentUri, self.logical_id, 'ContentUri') lambda_layer.CompatibleRuntimes = self.CompatibleRuntimes lambda_layer.LicenseInfo = self.LicenseInfo return lambda_layer def _get_retention_policy_value(self): """ Sets the deletion policy on this resource. The default is 'Retain'. :return: value for the DeletionPolicy attribute. """ if self.RetentionPolicy is None or self.RetentionPolicy.lower() == self.RETAIN.lower(): return self.RETAIN elif self.RetentionPolicy.lower() == self.DELETE.lower(): return self.DELETE elif self.RetentionPolicy.lower() not in self.retention_policy_options: raise InvalidResourceException(self.logical_id, "'{}' must be one of the following options: {}." .format('RetentionPolicy', [self.RETAIN, self.DELETE]))
class SamFunction(SamResourceMacro): """SAM function macro. """ # Constants for Tagging _SAM_KEY = "lambda:createdBy" _SAM_VALUE = "SAM" resource_type = 'AWS::Serverless::Function' property_types = { 'FunctionName': PropertyType(False, one_of(is_str(), is_type(dict))), 'Handler': PropertyType(True, is_str()), 'Runtime': PropertyType(True, is_str()), 'CodeUri': PropertyType(False, one_of(is_str(), is_type(dict))), 'InlineCode': PropertyType(False, one_of(is_str(), is_type(dict))), 'DeadLetterQueue': PropertyType(False, is_type(dict)), 'Description': PropertyType(False, is_str()), 'MemorySize': PropertyType(False, is_type(int)), 'Timeout': PropertyType(False, is_type(int)), 'VpcConfig': PropertyType(False, is_type(dict)), 'Role': PropertyType(False, is_str()), 'Policies': PropertyType(False, one_of(is_str(), list_of(one_of(is_str(), is_type(dict), is_type(dict))))), 'Environment': PropertyType(False, dict_of(is_str(), is_type(dict))), 'Events': PropertyType(False, dict_of(is_str(), is_type(dict))), 'Tags': PropertyType(False, is_type(dict)), 'Tracing': PropertyType(False, one_of(is_type(dict), is_str())), 'KmsKeyArn': PropertyType(False, one_of(is_type(dict), is_str())), 'DeploymentPreference': PropertyType(False, is_type(dict)), 'ReservedConcurrentExecutions': PropertyType(False, any_type()), # Intrinsic functions in value of Alias property are not supported, yet 'AutoPublishAlias': PropertyType(False, one_of(is_str())) } event_resolver = ResourceTypeResolver(samtranslator.model.eventsources, samtranslator.model.eventsources.pull, samtranslator.model.eventsources.push, samtranslator.model.eventsources.cloudwatchlogs) # DeadLetterQueue dead_letter_queue_policy_actions = {'SQS': 'sqs:SendMessage', 'SNS': 'sns:Publish'} # Customers can refer to the following properties of SAM function referable_properties = { "Alias": LambdaAlias.resource_type, "Version": LambdaVersion.resource_type, } def resources_to_link(self, resources): try: return { 'event_resources': self._event_resources_to_link(resources) } except InvalidEventException as e: raise InvalidResourceException(self.logical_id, e.message) def to_cloudformation(self, **kwargs): """Returns the Lambda function, role, and event resources to which this SAM Function corresponds. :param dict kwargs: already-converted resources that may need to be modified when converting this \ macro to pure CloudFormation :returns: a list of vanilla CloudFormation Resources, to which this Function expands :rtype: list """ resources = [] intrinsics_resolver = kwargs["intrinsics_resolver"] if self.DeadLetterQueue: self._validate_dlq() lambda_function = self._construct_lambda_function() resources.append(lambda_function) lambda_alias = None if self.AutoPublishAlias: alias_name = self._get_resolved_alias_name("AutoPublishAlias", self.AutoPublishAlias, intrinsics_resolver) lambda_version = self._construct_version(lambda_function, intrinsics_resolver=intrinsics_resolver) lambda_alias = self._construct_alias(alias_name, lambda_function, lambda_version) resources.append(lambda_version) resources.append(lambda_alias) if self.DeploymentPreference: self._validate_deployment_preference_and_add_update_policy(kwargs.get('deployment_preference_collection', None), lambda_alias, intrinsics_resolver) managed_policy_map = kwargs.get('managed_policy_map', {}) if not managed_policy_map: raise Exception('Managed policy map is empty, but should not be.') execution_role = None if lambda_function.Role is None: execution_role = self._construct_role(managed_policy_map) lambda_function.Role = execution_role.get_runtime_attr('arn') resources.append(execution_role) try: resources += self._generate_event_resources(lambda_function, execution_role, kwargs['event_resources'], lambda_alias=lambda_alias) except InvalidEventException as e: raise InvalidResourceException(self.logical_id, e.message) return resources def _get_resolved_alias_name(self, property_name, original_alias_value, intrinsics_resolver): """ Alias names can be supplied as an intrinsic function. This method tries to extract alias name from a reference to a parameter. If it cannot completely resolve (ie. if a complex intrinsic function was used), then this method raises an exception. If alias name is just a plain string, it will return as is :param dict or string original_alias_value: Value of Alias property as provided by the customer :param samtranslator.intrinsics.resolver.IntrinsicsResolver intrinsics_resolver: Instance of the resolver that knows how to resolve parameter references :return string: Alias name :raises InvalidResourceException: If the value is a complex intrinsic function that cannot be resolved """ # Try to resolve. resolved_alias_name = intrinsics_resolver.resolve_parameter_refs(original_alias_value) if not isinstance(resolved_alias_name, string_types): # This is still a dictionary which means we are not able to completely resolve intrinsics raise InvalidResourceException(self.logical_id, "'{}' must be a string or a Ref to a template parameter" .format(property_name)) return resolved_alias_name def _construct_lambda_function(self): """Constructs and returns the Lambda function. :returns: a list containing the Lambda function and execution role resources :rtype: list """ lambda_function = LambdaFunction(self.logical_id, depends_on=self.depends_on) if self.FunctionName: lambda_function.FunctionName = self.FunctionName lambda_function.Handler = self.Handler lambda_function.Runtime = self.Runtime lambda_function.Description = self.Description lambda_function.MemorySize = self.MemorySize lambda_function.Timeout = self.Timeout lambda_function.VpcConfig = self.VpcConfig lambda_function.Role = self.Role lambda_function.Environment = self.Environment lambda_function.Code = self._construct_code_dict() lambda_function.KmsKeyArn = self.KmsKeyArn lambda_function.ReservedConcurrentExecutions = self.ReservedConcurrentExecutions lambda_function.Tags = self._contruct_tag_list() if self.Tracing: lambda_function.TracingConfig = {"Mode": self.Tracing} if self.DeadLetterQueue: lambda_function.DeadLetterConfig = {"TargetArn": self.DeadLetterQueue['TargetArn']} return lambda_function def _contruct_tag_list(self): if not bool(self.Tags): self.Tags = {} if self._SAM_KEY in self.Tags: raise InvalidResourceException(self.logical_id, self._SAM_KEY + " is a reserved Tag key name and " "cannot be set on your function. " "Please change they tag key in the input.") sam_tag = {self._SAM_KEY: self._SAM_VALUE} # To maintain backwards compatibility with previous implementation, we *must* append SAM tag to the start of the # tags list. Changing this ordering will trigger a update on Lambda Function resource. Even though this # does not change the actual content of the tags, we don't want to trigger update of a resource without # customer's knowledge. return get_tag_list(sam_tag) + get_tag_list(self.Tags) def _construct_role(self, managed_policy_map): """Constructs a Lambda execution role based on this SAM function's Policies property. :returns: the generated IAM Role :rtype: model.iam.IAMRole """ execution_role = IAMRole(self.logical_id + 'Role') execution_role.AssumeRolePolicyDocument = IAMRolePolicies.lambda_assume_role_policy() managed_policy_arns = [ArnGenerator.generate_aws_managed_policy_arn('service-role/AWSLambdaBasicExecutionRole')] if self.Tracing: managed_policy_arns.append(ArnGenerator.generate_aws_managed_policy_arn('AWSXrayWriteOnlyAccess')) function_policies = FunctionPolicies({"Policies": self.Policies}, # No support for policy templates in the "core" policy_template_processor=None) policy_documents = [] if self.DeadLetterQueue: policy_documents.append(IAMRolePolicies.dead_letter_queue_policy( self.dead_letter_queue_policy_actions[self.DeadLetterQueue['Type']], self.DeadLetterQueue['TargetArn'])) for index, policy_entry in enumerate(function_policies.get()): if policy_entry.type is PolicyTypes.POLICY_STATEMENT: policy_documents.append({ 'PolicyName': execution_role.logical_id + 'Policy' + str(index), 'PolicyDocument': policy_entry.data }) elif policy_entry.type is PolicyTypes.MANAGED_POLICY: # There are three options: # Managed Policy Name (string): Try to convert to Managed Policy ARN # Managed Policy Arn (string): Insert it directly into the list # Intrinsic Function (dict): Insert it directly into the list # # When you insert into managed_policy_arns list, de-dupe to prevent same ARN from showing up twice # policy_arn = policy_entry.data if isinstance(policy_entry.data, string_types) and policy_entry.data in managed_policy_map: policy_arn = managed_policy_map[policy_entry.data] # De-Duplicate managed policy arns before inserting. Mainly useful # when customer specifies a managed policy which is already inserted # by SAM, such as AWSLambdaBasicExecutionRole if policy_arn not in managed_policy_arns: managed_policy_arns.append(policy_arn) else: # Policy Templates are not supported here in the "core" raise InvalidResourceException( self.logical_id, "Policy at index {} in the 'Policies' property is not valid".format(index)) execution_role.ManagedPolicyArns = list(managed_policy_arns) execution_role.Policies = policy_documents or None return execution_role def _validate_dlq(self): """Validates whether the DeadLetterQueue LogicalId is validation :raise: InvalidResourceException """ # Validate required logical ids valid_dlq_types = str(list(self.dead_letter_queue_policy_actions.keys())) if not self.DeadLetterQueue.get('Type') or not self.DeadLetterQueue.get('TargetArn'): raise InvalidResourceException(self.logical_id, "'DeadLetterQueue' requires Type and TargetArn properties to be specified" .format(valid_dlq_types)) # Validate required Types if not self.DeadLetterQueue['Type'] in self.dead_letter_queue_policy_actions: raise InvalidResourceException(self.logical_id, "'DeadLetterQueue' requires Type of {}".format(valid_dlq_types)) def _event_resources_to_link(self, resources): event_resources = {} if self.Events: for logical_id, event_dict in self.Events.items(): event_source = self.event_resolver.resolve_resource_type(event_dict).from_dict( self.logical_id + logical_id, event_dict, logical_id) event_resources[logical_id] = event_source.resources_to_link(resources) return event_resources def _generate_event_resources(self, lambda_function, execution_role, event_resources, lambda_alias=None): """Generates and returns the resources associated with this function's events. :param model.lambda_.LambdaFunction lambda_function: generated Lambda function :param iam.IAMRole execution_role: generated Lambda execution role :param implicit_api: Global Implicit API resource where the implicit APIs get attached to, if necessary :param implicit_api_stage: Global implicit API stage resource where implicit APIs get attached to, if necessary :param event_resources: All the event sources associated with this Lambda function :param model.lambda_.LambdaAlias lambda_alias: Optional Lambda Alias resource if we want to connect the event sources to this alias :returns: a list containing the function's event resources :rtype: list """ resources = [] if self.Events: for logical_id, event_dict in self.Events.items(): eventsource = self.event_resolver.resolve_resource_type(event_dict).from_dict( lambda_function.logical_id + logical_id, event_dict, logical_id) kwargs = { # When Alias is provided, connect all event sources to the alias and *not* the function 'function': lambda_alias or lambda_function, 'role': execution_role, } for name, resource in event_resources[logical_id].items(): kwargs[name] = resource resources += eventsource.to_cloudformation(**kwargs) return resources def _construct_code_dict(self): if self.CodeUri: return self._construct_code_dict_code_uri() elif self.InlineCode: return { "ZipFile": self.InlineCode } else: raise InvalidResourceException(self.logical_id, "Either 'InlineCode' or 'CodeUri' must be set") def _construct_code_dict_code_uri(self): """Constructs the Lambda function's `Code property`_, from the SAM function's CodeUri property. .. _Code property: \ http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lambda-function-code.html :returns: a Code dict, containing the S3 Bucket, Key, and Version of the Lambda function code :rtype: dict """ if isinstance(self.CodeUri, dict): if not self.CodeUri.get("Bucket", None) or not self.CodeUri.get("Key", None): # CodeUri is a dictionary but does not contain Bucket or Key property raise InvalidResourceException(self.logical_id, "'CodeUri' requires Bucket and Key properties to be specified") s3_pointer = self.CodeUri else: # CodeUri is NOT a dictionary. Parse it as a string s3_pointer = parse_s3_uri(self.CodeUri) if s3_pointer is None: raise InvalidResourceException(self.logical_id, '\'CodeUri\' is not a valid S3 Uri of the form ' '"s3://bucket/key" with optional versionId query parameter.') code = { 'S3Bucket': s3_pointer['Bucket'], 'S3Key': s3_pointer['Key'] } if 'Version' in s3_pointer: code['S3ObjectVersion'] = s3_pointer['Version'] return code def _construct_version(self, function, intrinsics_resolver): """Constructs a Lambda Version resource that will be auto-published when CodeUri of the function changes. Old versions will not be deleted without a direct reference from the CloudFormation template. :param model.lambda_.LambdaFunction function: Lambda function object that is being connected to a version :param model.intrinsics.resolver.IntrinsicsResolver intrinsics_resolver: Class that can help resolve references to parameters present in CodeUri. It is a common usecase to set S3Key of Code to be a template parameter. Need to resolve the values otherwise we will never detect a change in Code dict :return: Lambda function Version resource """ code_dict = function.Code if not code_dict: raise ValueError("Lambda function code must be a valid non-empty dictionary") if not intrinsics_resolver: raise ValueError("intrinsics_resolver is required for versions creation") # Resolve references to template parameters before creating hash. This will *not* resolve all intrinsics # because we cannot resolve runtime values like Arn of a resource. For purposes of detecting changes, this # is good enough. Here is why: # # When using intrinsic functions there are two cases when has must change: # - Value of the template parameter changes # - (or) LogicalId of a referenced resource changes ie. !GetAtt NewResource.Arn # # Later case will already change the hash because some value in the Code dictionary changes. We handle the # first case by resolving references to template parameters. It is okay even if these references are # present inside another intrinsic such as !Join. The resolver will replace the reference with the parameter's # value and keep all other parts of !Join identical. This will still trigger a change in the hash. code_dict = intrinsics_resolver.resolve_parameter_refs(code_dict) # Construct the LogicalID of Lambda version by appending 10 characters of SHA of CodeUri. This is necessary # to trigger creation of a new version every time code location changes. Since logicalId changes, CloudFormation # will drop the old version and create a new one for us. We set a DeletionPolicy on the version resource to # prevent CloudFormation from actually deleting the underlying version resource # # SHA Collisions: For purposes of triggering a new update, we are concerned about just the difference previous # and next hashes. The chances that two subsequent hashes collide is fairly low. prefix = "{id}Version".format(id=self.logical_id) logical_id = logical_id_generator.LogicalIdGenerator(prefix, code_dict).gen() retain_old_versions = { "DeletionPolicy": "Retain" } lambda_version = LambdaVersion(logical_id=logical_id, attributes=retain_old_versions) lambda_version.FunctionName = function.get_runtime_attr('name') return lambda_version def _construct_alias(self, name, function, version): """Constructs a Lambda Alias for the given function and pointing to the given version :param string name: Name of the alias :param model.lambda_.LambdaFunction function: Lambda function object to associate the alias with :param model.lambda_.LambdaVersion version: Lambda version object to associate the alias with :return: Lambda alias object :rtype model.lambda_.LambdaAlias """ if not name: raise ValueError("Alias name is required to create an alias") logical_id = "{id}Alias{suffix}".format(id=function.logical_id, suffix=name) alias = LambdaAlias(logical_id=logical_id) alias.Name = name alias.FunctionName = function.get_runtime_attr('name') alias.FunctionVersion = version.get_runtime_attr("version") return alias def _validate_deployment_preference_and_add_update_policy(self, deployment_preference_collection, lambda_alias, intrinsics_resolver): if 'Enabled' in self.DeploymentPreference: self.DeploymentPreference['Enabled'] = intrinsics_resolver.resolve_parameter_refs( self.DeploymentPreference['Enabled']) if isinstance(self.DeploymentPreference['Enabled'], dict): raise InvalidResourceException(self.logical_id, "'Enabled' must be a boolean value") if deployment_preference_collection is None: raise ValueError('deployment_preference_collection required for parsing the deployment preference') deployment_preference_collection.add(self.logical_id, self.DeploymentPreference) if deployment_preference_collection.get(self.logical_id).enabled: if self.AutoPublishAlias is None: raise InvalidResourceException( self.logical_id, "'DeploymentPreference' requires AutoPublishAlias property to be specified") if lambda_alias is None: raise ValueError('lambda_alias expected for updating it with the appropriate update policy') lambda_alias.set_resource_attribute("UpdatePolicy", deployment_preference_collection.update_policy( self.logical_id).to_dict())