class Cognito(PushEventSource): resource_type = "Cognito" principal = "cognito-idp.amazonaws.com" property_types = { "UserPool": PropertyType(True, is_str()), "Trigger": PropertyType(True, one_of(is_str(), list_of(is_str()))), } def resources_to_link(self, resources): if isinstance(self.UserPool, dict) and "Ref" in self.UserPool: userpool_id = self.UserPool["Ref"] if userpool_id in resources: return {"userpool": resources[userpool_id], "userpool_id": userpool_id} raise InvalidEventException( self.relative_id, "Cognito events must reference a Cognito UserPool in the same template." ) def to_cloudformation(self, **kwargs): function = kwargs.get("function") if not function: raise TypeError("Missing required keyword argument: function") if "userpool" not in kwargs or kwargs["userpool"] is None: raise TypeError("Missing required keyword argument: userpool") if "userpool_id" not in kwargs or kwargs["userpool_id"] is None: raise TypeError("Missing required keyword argument: userpool_id") userpool = kwargs["userpool"] userpool_id = kwargs["userpool_id"] resources = [] source_arn = fnGetAtt(userpool_id, "Arn") resources.append( self._construct_permission(function, source_arn=source_arn, prefix=function.logical_id + "Cognito") ) self._inject_lambda_config(function, userpool) resources.append(CognitoUserPool.from_dict(userpool_id, userpool)) return resources def _inject_lambda_config(self, function, userpool): event_triggers = self.Trigger if isinstance(self.Trigger, string_types): event_triggers = [self.Trigger] # TODO can these be conditional? properties = userpool.get("Properties", None) if properties is None: properties = {} userpool["Properties"] = properties lambda_config = properties.get("LambdaConfig", None) if lambda_config is None: lambda_config = {} properties["LambdaConfig"] = lambda_config for event_trigger in event_triggers: if event_trigger not in lambda_config: lambda_config[event_trigger] = function.get_runtime_attr("arn") else: raise InvalidEventException( self.relative_id, 'Cognito trigger "{trigger}" defined multiple times.'.format(trigger=self.Trigger) ) return userpool
class LambdaFunction(Resource): resource_type = "AWS::Lambda::Function" property_types = { "Code": PropertyType(True, is_type(dict)), "DeadLetterConfig": PropertyType(False, is_type(dict)), "Description": PropertyType(False, is_str()), "FunctionName": PropertyType(False, is_str()), "Handler": PropertyType(True, is_str()), "MemorySize": PropertyType(False, is_type(int)), "Role": PropertyType(False, is_str()), "Runtime": PropertyType(False, is_str()), "Timeout": PropertyType(False, is_type(int)), "VpcConfig": PropertyType(False, is_type(dict)), "Environment": PropertyType(False, is_type(dict)), "Tags": PropertyType(False, list_of(is_type(dict))), "TracingConfig": PropertyType(False, is_type(dict)), "KmsKeyArn": PropertyType(False, one_of(is_type(dict), is_str())), "Layers": PropertyType(False, list_of(one_of(is_str(), is_type(dict)))), "ReservedConcurrentExecutions": PropertyType(False, any_type()), "FileSystemConfigs": PropertyType(False, list_of(is_type(dict))), } runtime_attrs = { "name": lambda self: ref(self.logical_id), "arn": lambda self: fnGetAtt(self.logical_id, "Arn") }
class DummyResource(Resource): resource_type = "AWS::Dummy::Resource" property_types = { "RequiredProperty": PropertyType(True, valid_if_true), "OptionalProperty": PropertyType(False, valid_if_true), }
class CloudWatchEvent(EventSource): """CloudWatch Events/EventBridge event source for SAM State Machine.""" resource_type = "CloudWatchEvent" principal = "events.amazonaws.com" property_types = { "EventBusName": PropertyType(False, is_str()), "Pattern": PropertyType(False, is_type(dict)), "Input": PropertyType(False, is_str()), "InputPath": PropertyType(False, is_str()), "DeadLetterConfig": PropertyType(False, is_type(dict)), "RetryPolicy": PropertyType(False, is_type(dict)), } def to_cloudformation(self, resource, **kwargs): """Returns the CloudWatch Events/EventBridge Rule and IAM Role to which this CloudWatch Events/EventBridge event source corresponds. :param dict kwargs: no existing resources need to be modified :returns: a list of vanilla CloudFormation Resources, to which this CloudWatch Events/EventBridge event expands :rtype: list """ resources = [] permissions_boundary = kwargs.get("permissions_boundary") passthrough_resource_attributes = resource.get_passthrough_resource_attributes( ) events_rule = EventsRule(self.logical_id, attributes=passthrough_resource_attributes) events_rule.EventBusName = self.EventBusName events_rule.EventPattern = self.Pattern resources.append(events_rule) role = self._construct_role(resource, permissions_boundary) resources.append(role) source_arn = events_rule.get_runtime_attr("arn") dlq_queue_arn = None if self.DeadLetterConfig is not None: EventBridgeRuleUtils.validate_dlq_config(self.logical_id, self.DeadLetterConfig) dlq_queue_arn, dlq_resources = EventBridgeRuleUtils.get_dlq_queue_arn_and_resources( self, source_arn, passthrough_resource_attributes) resources.extend(dlq_resources) events_rule.Targets = [ self._construct_target(resource, role, dlq_queue_arn) ] return resources def _construct_target(self, resource, role, dead_letter_queue_arn=None): """Constructs the Target property for the CloudWatch Events/EventBridge Rule. :returns: the Target property :rtype: dict """ target = { "Arn": resource.get_runtime_attr("arn"), "Id": self.logical_id + "StepFunctionsTarget", "RoleArn": role.get_runtime_attr("arn"), } if self.Input is not None: target["Input"] = self.Input if self.InputPath is not None: target["InputPath"] = self.InputPath if self.DeadLetterConfig is not None: target["DeadLetterConfig"] = {"Arn": dead_letter_queue_arn} if self.RetryPolicy is not None: target["RetryPolicy"] = self.RetryPolicy return target
class Api(EventSource): """Api method event source for SAM State Machines.""" resource_type = "Api" principal = "apigateway.amazonaws.com" property_types = { "Path": PropertyType(True, is_str()), "Method": PropertyType(True, is_str()), # Api Event sources must "always" be paired with a Serverless::Api "RestApiId": PropertyType(True, is_str()), "Stage": PropertyType(False, is_str()), "Auth": PropertyType(False, is_type(dict)), } def resources_to_link(self, resources): """ If this API Event Source refers to an explicit API resource, resolve the reference and grab necessary data from the explicit API """ rest_api_id = self.RestApiId if isinstance(rest_api_id, dict) and "Ref" in rest_api_id: rest_api_id = rest_api_id["Ref"] # If RestApiId is a resource in the same template, then we try find the StageName by following the reference # Otherwise we default to a wildcard. This stage name is solely used to construct the permission to # allow this stage to invoke the State Machine. If we are unable to resolve the stage name, we will # simply permit all stages to invoke this State Machine # This hack is necessary because customers could use !ImportValue, !Ref or other intrinsic functions which # can be sometimes impossible to resolve (ie. when it has cross-stack references) permitted_stage = "*" stage_suffix = "AllStages" explicit_api = None if isinstance(rest_api_id, string_types): if (rest_api_id in resources and "Properties" in resources[rest_api_id] and "StageName" in resources[rest_api_id]["Properties"]): explicit_api = resources[rest_api_id]["Properties"] permitted_stage = explicit_api["StageName"] # Stage could be a intrinsic, in which case leave the suffix to default value if isinstance(permitted_stage, string_types): stage_suffix = permitted_stage else: stage_suffix = "Stage" else: # RestApiId is a string, not an intrinsic, but we did not find a valid API resource for this ID raise InvalidEventException( self.relative_id, "RestApiId property of Api event must reference a valid resource in the same template.", ) return { "explicit_api": explicit_api, "explicit_api_stage": { "suffix": stage_suffix } } def to_cloudformation(self, resource, **kwargs): """If the Api event source has a RestApi property, then simply return the IAM role resource allowing API Gateway to start the state machine execution. If no RestApi is provided, then additionally inject the path, method, and the x-amazon-apigateway-integration into the Swagger body for a provided implicit API. :param model.stepfunctions.resources.StepFunctionsStateMachine resource; the state machine \ resource to which the Api event source must be associated :param dict kwargs: a dict containing the implicit RestApi to be modified, should no \ explicit RestApi be provided. :returns: a list of vanilla CloudFormation Resources, to which this Api event expands :rtype: list """ resources = [] intrinsics_resolver = kwargs.get("intrinsics_resolver") permissions_boundary = kwargs.get("permissions_boundary") if self.Method is not None: # Convert to lower case so that user can specify either GET or get self.Method = self.Method.lower() role = self._construct_role(resource, permissions_boundary) resources.append(role) explicit_api = kwargs["explicit_api"] if explicit_api.get("__MANAGE_SWAGGER"): self._add_swagger_integration(explicit_api, resource, role, intrinsics_resolver) return resources def _add_swagger_integration(self, api, resource, role, intrinsics_resolver): """Adds the path and method for this Api event source to the Swagger body for the provided RestApi. :param model.apigateway.ApiGatewayRestApi rest_api: the RestApi to which the path and method should be added. """ swagger_body = api.get("DefinitionBody") if swagger_body is None: return resource_arn = resource.get_runtime_attr("arn") integration_uri = fnSub( "arn:${AWS::Partition}:apigateway:${AWS::Region}:states:action/StartExecution" ) editor = SwaggerEditor(swagger_body) if editor.has_integration(self.Path, self.Method): # Cannot add the integration, if it is already present raise InvalidEventException( self.relative_id, 'API method "{method}" defined multiple times for path "{path}".' .format(method=self.Method, path=self.Path), ) condition = None if CONDITION in resource.resource_attributes: condition = resource.resource_attributes[CONDITION] editor.add_state_machine_integration( self.Path, self.Method, integration_uri, role.get_runtime_attr("arn"), self._generate_request_template(resource), condition=condition, ) # Note: Refactor and combine the section below with the Api eventsource for functions if self.Auth: method_authorizer = self.Auth.get("Authorizer") api_auth = api.get("Auth") api_auth = intrinsics_resolver.resolve_parameter_refs(api_auth) if method_authorizer: api_authorizers = api_auth and api_auth.get("Authorizers") if method_authorizer != "AWS_IAM": if method_authorizer != "NONE" and not api_authorizers: raise InvalidEventException( self.relative_id, "Unable to set Authorizer [{authorizer}] on API method [{method}] for path [{path}] " "because the related API does not define any Authorizers." .format(authorizer=method_authorizer, method=self.Method, path=self.Path), ) if method_authorizer != "NONE" and not api_authorizers.get( method_authorizer): raise InvalidEventException( self.relative_id, "Unable to set Authorizer [{authorizer}] on API method [{method}] for path [{path}] " "because it wasn't defined in the API's Authorizers." .format(authorizer=method_authorizer, method=self.Method, path=self.Path), ) if method_authorizer == "NONE": if not api_auth or not api_auth.get( "DefaultAuthorizer"): raise InvalidEventException( self.relative_id, "Unable to set Authorizer on API method [{method}] for path [{path}] because 'NONE' " "is only a valid value when a DefaultAuthorizer on the API is specified." .format(method=self.Method, path=self.Path), ) if self.Auth.get("AuthorizationScopes") and not isinstance( self.Auth.get("AuthorizationScopes"), list): raise InvalidEventException( self.relative_id, "Unable to set Authorizer on API method [{method}] for path [{path}] because " "'AuthorizationScopes' must be a list of strings.".format( method=self.Method, path=self.Path), ) apikey_required_setting = self.Auth.get("ApiKeyRequired") apikey_required_setting_is_false = apikey_required_setting is not None and not apikey_required_setting if apikey_required_setting_is_false and ( not api_auth or not api_auth.get("ApiKeyRequired")): raise InvalidEventException( self.relative_id, "Unable to set ApiKeyRequired [False] on API method [{method}] for path [{path}] " "because the related API does not specify any ApiKeyRequired." .format(method=self.Method, path=self.Path), ) if method_authorizer or apikey_required_setting is not None: editor.add_auth_to_method(api=api, path=self.Path, method_name=self.Method, auth=self.Auth) if self.Auth.get("ResourcePolicy"): resource_policy = self.Auth.get("ResourcePolicy") editor.add_resource_policy(resource_policy=resource_policy, path=self.Path, api_id=self.RestApiId.get("Ref"), stage=self.Stage) if resource_policy.get("CustomStatements"): editor.add_custom_statements( resource_policy.get("CustomStatements")) api["DefinitionBody"] = editor.swagger def _generate_request_template(self, resource): """Generates the Body mapping request template for the Api. This allows for the input request to the Api to be passed as the execution input to the associated state machine resource. :param model.stepfunctions.resources.StepFunctionsStateMachine resource; the state machine resource to which the Api event source must be associated :returns: a body mapping request which passes the Api input to the state machine execution :rtype: dict """ request_templates = { "application/json": fnSub( json.dumps({ "input": "$util.escapeJavaScript($input.json('$'))", "stateMachineArn": "${" + resource.logical_id + "}", })) } return request_templates
class CognitoUserPool(Resource): resource_type = "AWS::Cognito::UserPool" property_types = { "AdminCreateUserConfig": PropertyType(False, is_type(dict)), "AliasAttributes": PropertyType(False, list_of(is_str())), "AutoVerifiedAttributes": PropertyType(False, list_of(is_str())), "DeviceConfiguration": PropertyType(False, is_type(dict)), "EmailConfiguration": PropertyType(False, is_type(dict)), "EmailVerificationMessage": PropertyType(False, is_str()), "EmailVerificationSubject": PropertyType(False, is_str()), "LambdaConfig": PropertyType(False, is_type(dict)), "MfaConfiguration": PropertyType(False, is_str()), "Policies": PropertyType(False, is_type(dict)), "Schema": PropertyType(False, list_of(dict)), "SmsAuthenticationMessage": PropertyType(False, is_str()), "SmsConfiguration": PropertyType(False, list_of(dict)), "SmsVerificationMessage": PropertyType(False, is_str()), "UsernameAttributes": PropertyType(False, list_of(is_str())), "UserPoolAddOns": PropertyType(False, list_of(dict)), "UserPoolName": PropertyType(False, is_str()), "UserPoolTags": PropertyType(False, is_type(dict)), "VerificationMessageTemplate": PropertyType(False, is_type(dict)), } runtime_attrs = { "name": lambda self: ref(self.logical_id), "arn": lambda self: fnGetAtt(self.logical_id, "Arn"), "provider_name": lambda self: fnGetAtt(self.logical_id, "ProviderName"), "provider_url": lambda self: fnGetAtt(self.logical_id, "ProviderURL"), }
class SamSimpleTable(SamResourceMacro): """SAM simple table macro. """ resource_type = 'AWS::Serverless::SimpleTable' property_types = { 'PrimaryKey': PropertyType(False, dict_of(is_str(), is_str())), 'ProvisionedThroughput': PropertyType(False, dict_of(is_str(), one_of(is_type(int), is_type(dict)))), 'TableName': PropertyType(False, one_of(is_str(), is_type(dict))), 'Tags': PropertyType(False, is_type(dict)) } attribute_type_conversions = {'String': 'S', 'Number': 'N', 'Binary': 'B'} def to_cloudformation(self, **kwargs): dynamodb_resources = self._construct_dynamodb_table() return [dynamodb_resources] def _construct_dynamodb_table(self): dynamodb_table = DynamoDBTable(self.logical_id, depends_on=self.depends_on) if self.PrimaryKey: primary_key = { 'AttributeName': self.PrimaryKey['Name'], 'AttributeType': self._convert_attribute_type(self.PrimaryKey['Type']) } else: primary_key = {'AttributeName': 'id', 'AttributeType': 'S'} dynamodb_table.AttributeDefinitions = [primary_key] dynamodb_table.KeySchema = [{ 'AttributeName': primary_key['AttributeName'], 'KeyType': 'HASH' }] if self.ProvisionedThroughput: provisioned_throughput = self.ProvisionedThroughput else: provisioned_throughput = { 'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5 } dynamodb_table.ProvisionedThroughput = provisioned_throughput if self.TableName: dynamodb_table.TableName = self.TableName if bool(self.Tags): dynamodb_table.Tags = get_tag_list(self.Tags) return dynamodb_table def _convert_attribute_type(self, attribute_type): if attribute_type in self.attribute_type_conversions: return self.attribute_type_conversions[attribute_type] raise InvalidResourceException( self.logical_id, 'Invalid \'Type\' "{actual}".'.format(actual=attribute_type))
class CognitoUserPool(Resource): resource_type = 'AWS::Cognito::UserPool' property_types = { 'AdminCreateUserConfig': PropertyType(False, is_type(dict)), 'AliasAttributes': PropertyType(False, list_of(is_str())), 'AutoVerifiedAttributes': PropertyType(False, list_of(is_str())), 'DeviceConfiguration': PropertyType(False, is_type(dict)), 'EmailConfiguration': PropertyType(False, is_type(dict)), 'EmailVerificationMessage': PropertyType(False, is_str()), 'EmailVerificationSubject': PropertyType(False, is_str()), 'LambdaConfig': PropertyType(False, is_type(dict)), 'MfaConfiguration': PropertyType(False, is_str()), 'Policies': PropertyType(False, is_type(dict)), 'Schema': PropertyType(False, list_of(dict)), 'SmsAuthenticationMessage': PropertyType(False, is_str()), 'SmsConfiguration': PropertyType(False, list_of(dict)), 'SmsVerificationMessage': PropertyType(False, is_str()), 'UsernameAttributes': PropertyType(False, list_of(is_str())), 'UserPoolAddOns': PropertyType(False, list_of(dict)), 'UserPoolName': PropertyType(False, is_str()), 'UserPoolTags': PropertyType(False, is_str()), 'VerificationMessageTemplate': PropertyType(False, is_type(dict)) } runtime_attrs = { "name": lambda self: ref(self.logical_id), "arn": lambda self: fnGetAtt(self.logical_id, "Arn"), "provider_name": lambda self: fnGetAtt(self.logical_id, "ProviderName"), "provider_url": lambda self: fnGetAtt(self.logical_id, "ProviderURL") }
class ApiGatewayStage(Resource): resource_type = 'AWS::ApiGateway::Stage' property_types = { 'AccessLogSetting': PropertyType(False, is_type(dict)), 'CacheClusterEnabled': PropertyType(False, is_type(bool)), 'CacheClusterSize': PropertyType(False, is_str()), 'CanarySetting': PropertyType(False, is_type(dict)), 'ClientCertificateId': PropertyType(False, is_str()), 'DeploymentId': PropertyType(True, is_str()), 'Description': PropertyType(False, is_str()), 'RestApiId': PropertyType(True, is_str()), 'StageName': PropertyType(True, one_of(is_str(), is_type(dict))), 'TracingEnabled': PropertyType(False, is_type(bool)), 'Variables': PropertyType(False, is_type(dict)), "MethodSettings": PropertyType(False, is_type(list)) } runtime_attrs = { "stage_name": lambda self: ref(self.logical_id), } def update_deployment_ref(self, deployment_logical_id): self.DeploymentId = ref(deployment_logical_id)
class LambdaEventSourceMapping(Resource): resource_type = 'AWS::Lambda::EventSourceMapping' property_types = { 'BatchSize': PropertyType(False, is_type(int)), 'Enabled': PropertyType(False, is_type(bool)), 'EventSourceArn': PropertyType(True, is_str()), 'FunctionName': PropertyType(True, is_str()), 'MaximumBatchingWindowInSeconds': PropertyType(False, is_type(int)), 'MaximumRetryAttempts': PropertyType(False, is_type(int)), 'BisectBatchOnFunctionError': PropertyType(False, is_type(bool)), 'MaximumRecordAgeInSeconds': PropertyType(False, is_type(int)), 'DestinationConfig': PropertyType(False, is_type(dict)), 'ParallelizationFactor': PropertyType(False, is_type(int)), 'StartingPosition': PropertyType(False, is_str()) } runtime_attrs = {"name": lambda self: ref(self.logical_id)}
class DummyResource(Resource): resource_type = 'AWS::Dummy::Resource' property_types = { 'RequiredProperty': PropertyType(True, valid_if_true), 'OptionalProperty': PropertyType(False, valid_if_true) }
class S3Bucket(Resource): resource_type = 'AWS::S3::Bucket' property_types = { 'AccessControl': PropertyType(False, any_type()), 'AccelerateConfiguration': PropertyType(False, any_type()), 'AnalyticsConfigurations': PropertyType(False, any_type()), 'BucketEncryption': PropertyType(False, any_type()), 'BucketName': PropertyType(False, is_str()), 'CorsConfiguration': PropertyType(False, any_type()), 'InventoryConfigurations': PropertyType(False, any_type()), 'LifecycleConfiguration': PropertyType(False, any_type()), 'LoggingConfiguration': PropertyType(False, any_type()), 'MetricsConfigurations': PropertyType(False, any_type()), 'NotificationConfiguration': PropertyType(False, is_type(dict)), 'ReplicationConfiguration': PropertyType(False, any_type()), 'Tags': PropertyType(False, is_type(list)), 'VersioningConfiguration': PropertyType(False, any_type()), 'WebsiteConfiguration': PropertyType(False, any_type()), } runtime_attrs = { "name": lambda self: ref(self.logical_id), "arn": lambda self: fnGetAtt(self.logical_id, "Arn") }
class Api(PushEventSource): """Api method event source for SAM Functions.""" resource_type = 'Api' principal = 'apigateway.amazonaws.com' property_types = { 'Path': PropertyType(True, is_str()), 'Method': PropertyType(True, is_str()), # Api Event sources must "always" be paired with a Serverless::Api 'RestApiId': PropertyType(True, is_str()) } def resources_to_link(self, resources): """ If this API Event Source refers to an explicit API resource, resolve the reference and grab necessary data from the explicit API """ rest_api_id = self.RestApiId if isinstance(rest_api_id, dict) and "Ref" in rest_api_id: rest_api_id = rest_api_id["Ref"] # If RestApiId is a resource in the same template, then we try find the StageName by following the reference # Otherwise we default to a wildcard. This stage name is solely used to construct the permission to # allow this stage to invoke the Lambda function. If we are unable to resolve the stage name, we will # simply permit all stages to invoke this Lambda function # This hack is necessary because customers could use !ImportValue, !Ref or other intrinsic functions which # can be sometimes impossible to resolve (ie. when it has cross-stack references) permitted_stage = "*" stage_suffix = "AllStages" explicit_api = None if isinstance(rest_api_id, string_types): if rest_api_id in resources \ and "Properties" in resources[rest_api_id] \ and "StageName" in resources[rest_api_id]["Properties"]: explicit_api = resources[rest_api_id]["Properties"] permitted_stage = explicit_api["StageName"] # Stage could be a intrinsic, in which case leave the suffix to default value if isinstance(permitted_stage, string_types): stage_suffix = permitted_stage else: stage_suffix = "Stage" else: # RestApiId is a string, not an intrinsic, but we did not find a valid API resource for this ID raise InvalidEventException( self.relative_id, "RestApiId property of Api event must reference a valid " "resource in the same template.") return { 'explicit_api': explicit_api, 'explicit_api_stage': { 'permitted_stage': permitted_stage, 'suffix': stage_suffix } } def to_cloudformation(self, **kwargs): """If the Api event source has a RestApi property, then simply return the Lambda Permission resource allowing API Gateway to call the function. If no RestApi is provided, then additionally inject the path, method, and the x-amazon-apigateway-integration into the Swagger body for a provided implicit API. :param dict kwargs: a dict containing the implicit RestApi to be modified, should no explicit RestApi \ be provided. :returns: a list of vanilla CloudFormation Resources, to which this Api event expands :rtype: list """ resources = [] function = kwargs.get('function') if not function: raise TypeError("Missing required keyword argument: function") if self.Method is not None: # Convert to lower case so that user can specify either GET or get self.Method = self.Method.lower() resources.extend(self._get_permissions(kwargs)) explicit_api = kwargs['explicit_api'] if explicit_api.get("__MANAGE_SWAGGER"): self._add_swagger_integration(explicit_api, function) return resources def _get_permissions(self, resources_to_link): permissions = [] permissions.append(self._get_permission(resources_to_link, "*", "Test")) # By default, implicit APIs get a stage called Prod. If the API event refers to an # explicit API using RestApiId property, we should grab the stage name of the explicit API permitted_stage = suffix = "Prod" if 'explicit_api_stage' in resources_to_link: permitted_stage = resources_to_link['explicit_api_stage'][ 'permitted_stage'] suffix = resources_to_link['explicit_api_stage']['suffix'] permissions.append( self._get_permission(resources_to_link, permitted_stage, suffix)) return permissions def _get_permission(self, resources_to_link, stage, suffix): if not stage or not suffix: raise RuntimeError("Could not add permission to lambda function.") path = self.Path.replace('{proxy+}', '*') api_id = self.RestApiId # RestApiId can be a simple string or intrinsic function like !Ref. Using Fn::Sub will handle both cases resource = '${__ApiId__}/' + '${__Stage__}/' + self.Method.upper( ) + path partition = ArnGenerator.get_partition_name() source_arn = fnSub( ArnGenerator.generate_arn(partition=partition, service='execute-api', resource=resource), { "__ApiId__": api_id, "__Stage__": stage }) return self._construct_permission(resources_to_link['function'], source_arn=source_arn, suffix=suffix) def _add_swagger_integration(self, api, function): """Adds the path and method for this Api event source to the Swagger body for the provided RestApi. :param model.apigateway.ApiGatewayRestApi rest_api: the RestApi to which the path and method should be added. """ swagger_body = api.get("DefinitionBody") if swagger_body is None: return function_arn = function.get_runtime_attr('arn') partition = ArnGenerator.get_partition_name() uri = fnSub( 'arn:' + partition + ':apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/' + make_shorthand(function_arn) + '/invocations') editor = SwaggerEditor(swagger_body) if editor.has_integration(self.Path, self.Method): # Cannot add the Lambda Integration, if it is already present raise InvalidEventException( self.relative_id, 'API method "{method}" defined multiple times for path "{path}".' .format(method=self.Method, path=self.Path)) editor.add_lambda_integration(self.Path, self.Method, uri) api["DefinitionBody"] = editor.swagger
class HttpApi(PushEventSource): """Api method event source for SAM Functions.""" resource_type = "HttpApi" principal = "apigateway.amazonaws.com" property_types = { "Path": PropertyType(False, is_str()), "Method": PropertyType(False, is_str()), "ApiId": PropertyType(False, is_str()), "Stage": PropertyType(False, is_str()), "Auth": PropertyType(False, is_type(dict)), } def resources_to_link(self, resources): """ If this API Event Source refers to an explicit API resource, resolve the reference and grab necessary data from the explicit API """ api_id = self.ApiId if isinstance(api_id, dict) and "Ref" in api_id: api_id = api_id["Ref"] explicit_api = resources[api_id].get("Properties") return {"explicit_api": explicit_api} def to_cloudformation(self, **kwargs): """If the Api event source has a RestApi property, then simply return the Lambda Permission resource allowing API Gateway to call the function. If no RestApi is provided, then additionally inject the path, method, and the x-amazon-apigateway-integration into the OpenApi body for a provided implicit API. :param dict kwargs: a dict containing the implicit RestApi to be modified, should no explicit RestApi \ be provided. :returns: a list of vanilla CloudFormation Resources, to which this Api event expands :rtype: list """ resources = [] function = kwargs.get("function") if self.Method is not None: # Convert to lower case so that user can specify either GET or get self.Method = self.Method.lower() resources.extend(self._get_permissions(kwargs)) explicit_api = kwargs["explicit_api"] self._add_openapi_integration(explicit_api, function, explicit_api.get("__MANAGE_SWAGGER")) return resources def _get_permissions(self, resources_to_link): permissions = [] # Give permission to all stages by default permitted_stage = "*" permission = self._get_permission(resources_to_link, permitted_stage) if permission: permissions.append(permission) return permissions def _get_permission(self, resources_to_link, stage): # It turns out that APIGW doesn't like trailing slashes in paths (#665) # and removes as a part of their behaviour, but this isn't documented. # The regex removes the tailing slash to ensure the permission works as intended path = re.sub(r"^(.+)/$", r"\1", self.Path) editor = None if resources_to_link["explicit_api"].get("DefinitionBody"): try: editor = OpenApiEditor(resources_to_link["explicit_api"].get("DefinitionBody")) except ValueError as e: api_logical_id = self.ApiId.get("Ref") if isinstance(self.ApiId, dict) else self.ApiId raise InvalidResourceException(api_logical_id, e) # If this is using the new $default path, keep path blank and add a * permission if path == OpenApiEditor._DEFAULT_PATH: path = "" elif editor and resources_to_link.get("function").logical_id == editor.get_integration_function_logical_id( OpenApiEditor._DEFAULT_PATH, OpenApiEditor._X_ANY_METHOD ): # Case where default exists for this function, and so the permissions for that will apply here as well # This can save us several CFN resources (not duplicating permissions) return else: path = OpenApiEditor.get_path_without_trailing_slash(path) # Handle case where Method is already the ANY ApiGateway extension if self.Method.lower() == "any" or self.Method.lower() == OpenApiEditor._X_ANY_METHOD: method = "*" else: method = self.Method.upper() api_id = self.ApiId # ApiId can be a simple string or intrinsic function like !Ref. Using Fn::Sub will handle both cases resource = "${__ApiId__}/" + "${__Stage__}/" + method + path source_arn = fnSub( ArnGenerator.generate_arn(partition="${AWS::Partition}", service="execute-api", resource=resource), {"__ApiId__": api_id, "__Stage__": stage}, ) return self._construct_permission(resources_to_link["function"], source_arn=source_arn) def _add_openapi_integration(self, api, function, manage_swagger=False): """Adds the path and method for this Api event source to the OpenApi body for the provided RestApi. :param model.apigateway.ApiGatewayRestApi rest_api: the RestApi to which the path and method should be added. """ open_api_body = api.get("DefinitionBody") if open_api_body is None: return function_arn = function.get_runtime_attr("arn") uri = fnSub( "arn:${AWS::Partition}:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/" + make_shorthand(function_arn) + "/invocations" ) editor = OpenApiEditor(open_api_body) if manage_swagger and editor.has_integration(self.Path, self.Method): # Cannot add the Lambda Integration, if it is already present raise InvalidEventException( self.relative_id, "API method '{method}' defined multiple times for path '{path}'.".format( method=self.Method, path=self.Path ), ) condition = None if CONDITION in function.resource_attributes: condition = function.resource_attributes[CONDITION] editor.add_lambda_integration(self.Path, self.Method, uri, self.Auth, api.get("Auth"), condition=condition) if self.Auth: self._add_auth_to_openapi_integration(api, editor) api["DefinitionBody"] = editor.openapi def _add_auth_to_openapi_integration(self, api, editor): """Adds authorization to the lambda integration :param api: api object :param editor: OpenApiEditor object that contains the OpenApi definition """ method_authorizer = self.Auth.get("Authorizer") api_auth = api.get("Auth") if not method_authorizer: if api_auth.get("DefaultAuthorizer"): self.Auth["Authorizer"] = method_authorizer = api_auth.get("DefaultAuthorizer") else: # currently, we require either a default auth or auth in the method raise InvalidEventException( self.relative_id, "'Auth' section requires either " "an explicit 'Authorizer' set or a 'DefaultAuthorizer' " "configured on the HttpApi.", ) # Default auth should already be applied, so apply any other auth here or scope override to default api_authorizers = api_auth and api_auth.get("Authorizers") if method_authorizer != "NONE" and not api_authorizers: raise InvalidEventException( self.relative_id, "Unable to set Authorizer [{authorizer}] on API method [{method}] for path [{path}] " "because the related API does not define any Authorizers.".format( authorizer=method_authorizer, method=self.Method, path=self.Path ), ) if method_authorizer != "NONE" and not api_authorizers.get(method_authorizer): raise InvalidEventException( self.relative_id, "Unable to set Authorizer [{authorizer}] on API method [{method}] for path [{path}] " "because it wasn't defined in the API's Authorizers.".format( authorizer=method_authorizer, method=self.Method, path=self.Path ), ) if method_authorizer == "NONE" and not api_auth.get("DefaultAuthorizer"): raise InvalidEventException( self.relative_id, "Unable to set Authorizer on API method [{method}] for path [{path}] because 'NONE' " "is only a valid value when a DefaultAuthorizer on the API is specified.".format( method=self.Method, path=self.Path ), ) if self.Auth.get("AuthorizationScopes") and not isinstance(self.Auth.get("AuthorizationScopes"), list): raise InvalidEventException( self.relative_id, "Unable to set Authorizer on API method [{method}] for path [{path}] because " "'AuthorizationScopes' must be a list of strings.".format(method=self.Method, path=self.Path), ) editor.add_auth_to_method(api=api, path=self.Path, method_name=self.Method, auth=self.Auth)
class SamApplication(SamResourceMacro): """SAM application macro. """ APPLICATION_ID_KEY = 'ApplicationId' SEMANTIC_VERSION_KEY = 'SemanticVersion' resource_type = 'AWS::Serverless::Application' # The plugin will always insert the TemplateUrl parameter property_types = { 'Location': PropertyType(True, one_of(is_str(), is_type(dict))), 'TemplateUrl': PropertyType(False, is_str()), 'Parameters': PropertyType(False, is_type(dict)), 'NotificationARNs': PropertyType(False, list_of(one_of(is_str(), is_type(dict)))), 'Tags': PropertyType(False, is_type(dict)), 'TimeoutInMinutes': PropertyType(False, is_type(int)) } def to_cloudformation(self, **kwargs): """Returns the stack with the proper parameters for this application """ nested_stack = self._construct_nested_stack() return [nested_stack] def _construct_nested_stack(self): """Constructs a AWS::CloudFormation::Stack resource """ nested_stack = NestedStack( self.logical_id, depends_on=self.depends_on, attributes=self.get_passthrough_resource_attributes()) nested_stack.Parameters = self.Parameters nested_stack.NotificationARNs = self.NotificationARNs application_tags = self._get_application_tags() nested_stack.Tags = self._construct_tag_list(self.Tags, application_tags) nested_stack.TimeoutInMinutes = self.TimeoutInMinutes nested_stack.TemplateURL = self.TemplateUrl if self.TemplateUrl else "" return nested_stack def _get_application_tags(self): """Adds tags to the stack if this resource is using the serverless app repo """ application_tags = {} if isinstance(self.Location, dict): if (self.APPLICATION_ID_KEY in self.Location.keys() and self.Location[self.APPLICATION_ID_KEY] is not None): application_tags[self._SAR_APP_KEY] = self.Location[ self.APPLICATION_ID_KEY] if (self.SEMANTIC_VERSION_KEY in self.Location.keys() and self.Location[self.SEMANTIC_VERSION_KEY] is not None): application_tags[self._SAR_SEMVER_KEY] = self.Location[ self.SEMANTIC_VERSION_KEY] return application_tags
class S3(PushEventSource): """S3 bucket event source for SAM Functions.""" resource_type = 'S3' principal = 's3.amazonaws.com' property_types = { 'Bucket': PropertyType(True, is_str()), 'Events': PropertyType(True, one_of(is_str(), list_of(is_str()))), 'Filter': PropertyType(False, dict_of(is_str(), is_str())) } def resources_to_link(self, resources): if isinstance(self.Bucket, dict) and 'Ref' in self.Bucket: bucket_id = self.Bucket['Ref'] if bucket_id in resources: return { 'bucket': resources[bucket_id], 'bucket_id': bucket_id } raise InvalidEventException(self.relative_id, "S3 events must reference an S3 bucket in the same template.") def to_cloudformation(self, **kwargs): """Returns the Lambda Permission resource allowing S3 to invoke the function this event source triggers. :param dict kwargs: S3 bucket resource :returns: a list of vanilla CloudFormation Resources, to which this S3 event expands :rtype: list """ function = kwargs.get('function') if not function: raise TypeError("Missing required keyword argument: function") if 'bucket' not in kwargs or kwargs['bucket'] is None: raise TypeError("Missing required keyword argument: bucket") if 'bucket_id' not in kwargs or kwargs['bucket_id'] is None: raise TypeError("Missing required keyword argument: bucket_id") bucket = kwargs['bucket'] bucket_id = kwargs['bucket_id'] resources = [] source_account = ref('AWS::AccountId') permission = self._construct_permission(function, source_account=source_account) if CONDITION in permission.resource_attributes: self._depend_on_lambda_permissions_using_tag(bucket, permission) else: self._depend_on_lambda_permissions(bucket, permission) resources.append(permission) # NOTE: `bucket` here is a dictionary representing the S3 Bucket resource in your SAM template. If there are # multiple S3 Events attached to the same bucket, we will update the Bucket resource with notification # configuration for each event. This is the reason why we continue to use existing bucket dict and append onto # it. # # NOTE: There is some fragile logic here where we will append multiple resources to output # SAM template but de-dupe them when merging into output CFN template. This is scary because the order of # merging is literally "last one wins", which works fine because we linearly loop through the template once. # The de-dupe happens inside `samtranslator.translator.Translator.translate` method when merging results of # to_cloudformation() to output template. self._inject_notification_configuration(function, bucket) resources.append(S3Bucket.from_dict(bucket_id, bucket)) return resources def _depend_on_lambda_permissions(self, bucket, permission): """ Make the S3 bucket depends on Lambda Permissions resource because when S3 adds a Notification Configuration, it will check whether it has permissions to access Lambda. This will fail if the Lambda::Permissions is not already applied for this bucket to invoke the Lambda. :param dict bucket: Dictionary representing the bucket in SAM template. This is a raw dictionary and not a "resource" object :param model.lambda_.lambda_permission permission: Lambda Permission resource that needs to be created before the bucket. :return: Modified Bucket dictionary """ depends_on = bucket.get("DependsOn", []) # DependsOn can be either a list of strings or a scalar string if isinstance(depends_on, string_types): depends_on = [depends_on] depends_on_set = set(depends_on) depends_on_set.add(permission.logical_id) bucket["DependsOn"] = list(depends_on_set) return bucket def _depend_on_lambda_permissions_using_tag(self, bucket, permission): """ Since conditional DependsOn is not supported this undocumented way of implicitely making dependency through tags is used. See https://stackoverflow.com/questions/34607476/cloudformation-apply-condition-on-dependson It is done by using Ref wrapped in a conditional Fn::If. Using Ref implies a dependency, so CloudFormation will automatically wait once it reaches that function, the same as if you were using a DependsOn. """ properties = bucket.get('Properties', None) if properties is None: properties = {} bucket['Properties'] = properties tags = properties.get('Tags', None) if tags is None: tags = [] properties['Tags'] = tags dep_tag = { 'sam:ConditionalDependsOn:' + permission.logical_id: { 'Fn::If': [ permission.resource_attributes[CONDITION], ref(permission.logical_id), 'no dependency' ] } } properties['Tags'] = tags + get_tag_list(dep_tag) return bucket def _inject_notification_configuration(self, function, bucket): base_event_mapping = { 'Function': function.get_runtime_attr("arn") } if self.Filter is not None: base_event_mapping['Filter'] = self.Filter event_types = self.Events if isinstance(self.Events, string_types): event_types = [self.Events] event_mappings = [] for event_type in event_types: lambda_event = copy.deepcopy(base_event_mapping) lambda_event['Event'] = event_type if CONDITION in function.resource_attributes: lambda_event = make_conditional(function.resource_attributes[CONDITION], lambda_event) event_mappings.append(lambda_event) properties = bucket.get('Properties', None) if properties is None: properties = {} bucket['Properties'] = properties notification_config = properties.get('NotificationConfiguration', None) if notification_config is None: notification_config = {} properties['NotificationConfiguration'] = notification_config lambda_notifications = notification_config.get('LambdaConfigurations', None) if lambda_notifications is None: lambda_notifications = [] notification_config['LambdaConfigurations'] = lambda_notifications for event_mapping in event_mappings: if event_mapping not in lambda_notifications: lambda_notifications.append(event_mapping) return bucket
class SamLayerVersion(SamResourceMacro): """ SAM Layer macro """ resource_type = 'AWS::Serverless::LayerVersion' property_types = { 'LayerName': PropertyType(False, one_of(is_str(), is_type(dict))), 'Description': PropertyType(False, is_str()), 'ContentUri': PropertyType(True, one_of(is_str(), is_type(dict))), 'CompatibleRuntimes': PropertyType(False, list_of(is_str())), 'LicenseInfo': PropertyType(False, is_str()), 'RetentionPolicy': PropertyType(False, is_str()) } RETAIN = 'Retain' DELETE = 'Delete' retention_policy_options = [RETAIN.lower(), DELETE.lower()] def to_cloudformation(self, **kwargs): """Returns the Lambda layer to which this SAM Layer corresponds. :param dict kwargs: already-converted resources that may need to be modified when converting this \ macro to pure CloudFormation :returns: a list of vanilla CloudFormation Resources, to which this Function expands :rtype: list """ resources = [] # Append any CFN resources: intrinsics_resolver = kwargs["intrinsics_resolver"] resources.append(self._construct_lambda_layer(intrinsics_resolver)) return resources def _construct_lambda_layer(self, intrinsics_resolver): """Constructs and returns the Lambda function. :returns: a list containing the Lambda function and execution role resources :rtype: list """ # Resolve intrinsics if applicable: self.LayerName = self._resolve_string_parameter(intrinsics_resolver, self.LayerName, 'LayerName') self.LicenseInfo = self._resolve_string_parameter(intrinsics_resolver, self.LicenseInfo, 'LicenseInfo') self.Description = self._resolve_string_parameter(intrinsics_resolver, self.Description, 'Description') self.RetentionPolicy = self._resolve_string_parameter(intrinsics_resolver, self.RetentionPolicy, 'RetentionPolicy') retention_policy_value = self._get_retention_policy_value() attributes = self.get_passthrough_resource_attributes() if attributes is None: attributes = {} attributes['DeletionPolicy'] = retention_policy_value old_logical_id = self.logical_id new_logical_id = logical_id_generator.LogicalIdGenerator(old_logical_id, self.to_dict()).gen() self.logical_id = new_logical_id lambda_layer = LambdaLayerVersion(self.logical_id, depends_on=self.depends_on, attributes=attributes) # Changing the LayerName property: when a layer is published, it is given an Arn # example: arn:aws:lambda:us-west-2:123456789012:layer:MyLayer:1 # where MyLayer is the LayerName property if it exists; otherwise, it is the # LogicalId of this resource. Since a LayerVersion is an immutable resource, when # CloudFormation updates this resource, it will ALWAYS create a new version then # delete the old version if the logical ids match. What this does is change the # logical id of every layer (so a `DeletionPolicy: Retain` can work) and set the # LayerName property of the layer so that the Arn will still always be the same # with the exception of an incrementing version number. if not self.LayerName: self.LayerName = old_logical_id lambda_layer.LayerName = self.LayerName lambda_layer.Description = self.Description lambda_layer.Content = construct_s3_location_object(self.ContentUri, self.logical_id, 'ContentUri') lambda_layer.CompatibleRuntimes = self.CompatibleRuntimes lambda_layer.LicenseInfo = self.LicenseInfo return lambda_layer def _get_retention_policy_value(self): """ Sets the deletion policy on this resource. The default is 'Retain'. :return: value for the DeletionPolicy attribute. """ if self.RetentionPolicy is None or self.RetentionPolicy.lower() == self.RETAIN.lower(): return self.RETAIN elif self.RetentionPolicy.lower() == self.DELETE.lower(): return self.DELETE elif self.RetentionPolicy.lower() not in self.retention_policy_options: raise InvalidResourceException(self.logical_id, "'{}' must be one of the following options: {}." .format('RetentionPolicy', [self.RETAIN, self.DELETE]))
class Api(PushEventSource): """Api method event source for SAM Functions.""" resource_type = 'Api' principal = 'apigateway.amazonaws.com' property_types = { 'Path': PropertyType(True, is_str()), 'Method': PropertyType(True, is_str()), # Api Event sources must "always" be paired with a Serverless::Api 'RestApiId': PropertyType(True, is_str()), 'Auth': PropertyType(False, is_type(dict)), 'RequestModel': PropertyType(False, is_type(dict)), 'RequestParameters': PropertyType(False, is_type(list)) } def resources_to_link(self, resources): """ If this API Event Source refers to an explicit API resource, resolve the reference and grab necessary data from the explicit API """ rest_api_id = self.RestApiId if isinstance(rest_api_id, dict) and "Ref" in rest_api_id: rest_api_id = rest_api_id["Ref"] # If RestApiId is a resource in the same template, then we try find the StageName by following the reference # Otherwise we default to a wildcard. This stage name is solely used to construct the permission to # allow this stage to invoke the Lambda function. If we are unable to resolve the stage name, we will # simply permit all stages to invoke this Lambda function # This hack is necessary because customers could use !ImportValue, !Ref or other intrinsic functions which # can be sometimes impossible to resolve (ie. when it has cross-stack references) permitted_stage = "*" stage_suffix = "AllStages" explicit_api = None if isinstance(rest_api_id, string_types): if rest_api_id in resources \ and "Properties" in resources[rest_api_id] \ and "StageName" in resources[rest_api_id]["Properties"]: explicit_api = resources[rest_api_id]["Properties"] permitted_stage = explicit_api["StageName"] # Stage could be a intrinsic, in which case leave the suffix to default value if isinstance(permitted_stage, string_types): if not permitted_stage: raise InvalidResourceException(rest_api_id, 'StageName cannot be empty.') stage_suffix = permitted_stage else: stage_suffix = "Stage" else: # RestApiId is a string, not an intrinsic, but we did not find a valid API resource for this ID raise InvalidEventException(self.relative_id, "RestApiId property of Api event must reference a valid " "resource in the same template.") return { 'explicit_api': explicit_api, 'explicit_api_stage': { 'permitted_stage': permitted_stage, 'suffix': stage_suffix } } def to_cloudformation(self, **kwargs): """If the Api event source has a RestApi property, then simply return the Lambda Permission resource allowing API Gateway to call the function. If no RestApi is provided, then additionally inject the path, method, and the x-amazon-apigateway-integration into the Swagger body for a provided implicit API. :param dict kwargs: a dict containing the implicit RestApi to be modified, should no explicit RestApi \ be provided. :returns: a list of vanilla CloudFormation Resources, to which this Api event expands :rtype: list """ resources = [] function = kwargs.get('function') if not function: raise TypeError("Missing required keyword argument: function") if self.Method is not None: # Convert to lower case so that user can specify either GET or get self.Method = self.Method.lower() resources.extend(self._get_permissions(kwargs)) explicit_api = kwargs['explicit_api'] if explicit_api.get("__MANAGE_SWAGGER"): self._add_swagger_integration(explicit_api, function) return resources def _get_permissions(self, resources_to_link): permissions = [] permissions.append(self._get_permission(resources_to_link, "*", "Test")) # By default, implicit APIs get a stage called Prod. If the API event refers to an # explicit API using RestApiId property, we should grab the stage name of the explicit API permitted_stage = suffix = "Prod" if 'explicit_api_stage' in resources_to_link: permitted_stage = resources_to_link['explicit_api_stage']['permitted_stage'] suffix = resources_to_link['explicit_api_stage']['suffix'] permissions.append(self._get_permission(resources_to_link, permitted_stage, suffix)) return permissions def _get_permission(self, resources_to_link, stage, suffix): # It turns out that APIGW doesn't like trailing slashes in paths (#665) # and removes as a part of their behaviour, but this isn't documented. # The regex removes the tailing slash to ensure the permission works as intended path = re.sub(r'^(.+)/$', r'\1', self.Path) if not stage or not suffix: raise RuntimeError("Could not add permission to lambda function.") path = re.sub(r'{([a-zA-Z0-9._-]+|proxy\+)}', '*', path) method = '*' if self.Method.lower() == 'any' else self.Method.upper() api_id = self.RestApiId # RestApiId can be a simple string or intrinsic function like !Ref. Using Fn::Sub will handle both cases resource = '${__ApiId__}/' + '${__Stage__}/' + method + path partition = ArnGenerator.get_partition_name() source_arn = fnSub(ArnGenerator.generate_arn(partition=partition, service='execute-api', resource=resource), {"__ApiId__": api_id, "__Stage__": stage}) return self._construct_permission(resources_to_link['function'], source_arn=source_arn, suffix=suffix) def _add_swagger_integration(self, api, function): """Adds the path and method for this Api event source to the Swagger body for the provided RestApi. :param model.apigateway.ApiGatewayRestApi rest_api: the RestApi to which the path and method should be added. """ swagger_body = api.get("DefinitionBody") if swagger_body is None: return function_arn = function.get_runtime_attr('arn') partition = ArnGenerator.get_partition_name() uri = fnSub('arn:' + partition + ':apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/' + make_shorthand(function_arn) + '/invocations') editor = SwaggerEditor(swagger_body) if editor.has_integration(self.Path, self.Method): # Cannot add the Lambda Integration, if it is already present raise InvalidEventException( self.relative_id, 'API method "{method}" defined multiple times for path "{path}".'.format( method=self.Method, path=self.Path)) condition = None if CONDITION in function.resource_attributes: condition = function.resource_attributes[CONDITION] editor.add_lambda_integration(self.Path, self.Method, uri, self.Auth, api.get('Auth'), condition=condition) if self.Auth: method_authorizer = self.Auth.get('Authorizer') api_auth = api.get('Auth') if method_authorizer: api_authorizers = api_auth and api_auth.get('Authorizers') if method_authorizer != 'AWS_IAM': if method_authorizer != 'NONE' and not api_authorizers: raise InvalidEventException( self.relative_id, 'Unable to set Authorizer [{authorizer}] on API method [{method}] for path [{path}] ' 'because the related API does not define any Authorizers.'.format( authorizer=method_authorizer, method=self.Method, path=self.Path)) if method_authorizer != 'NONE' and not api_authorizers.get(method_authorizer): raise InvalidEventException( self.relative_id, 'Unable to set Authorizer [{authorizer}] on API method [{method}] for path [{path}] ' 'because it wasn\'t defined in the API\'s Authorizers.'.format( authorizer=method_authorizer, method=self.Method, path=self.Path)) if method_authorizer == 'NONE' and not api_auth.get('DefaultAuthorizer'): raise InvalidEventException( self.relative_id, 'Unable to set Authorizer on API method [{method}] for path [{path}] because \'NONE\' ' 'is only a valid value when a DefaultAuthorizer on the API is specified.'.format( method=self.Method, path=self.Path)) apikey_required_setting = self.Auth.get('ApiKeyRequired') apikey_required_setting_is_false = apikey_required_setting is not None and not apikey_required_setting if apikey_required_setting_is_false and not api_auth.get('ApiKeyRequired'): raise InvalidEventException( self.relative_id, 'Unable to set ApiKeyRequired [False] on API method [{method}] for path [{path}] ' 'because the related API does not specify any ApiKeyRequired.'.format( method=self.Method, path=self.Path)) if method_authorizer or apikey_required_setting is not None: editor.add_auth_to_method(api=api, path=self.Path, method_name=self.Method, auth=self.Auth) if self.RequestModel: method_model = self.RequestModel.get('Model') if method_model: api_models = api.get('Models') if not api_models: raise InvalidEventException( self.relative_id, 'Unable to set RequestModel [{model}] on API method [{method}] for path [{path}] ' 'because the related API does not define any Models.'.format( model=method_model, method=self.Method, path=self.Path)) if not api_models.get(method_model): raise InvalidEventException( self.relative_id, 'Unable to set RequestModel [{model}] on API method [{method}] for path [{path}] ' 'because it wasn\'t defined in the API\'s Models.'.format( model=method_model, method=self.Method, path=self.Path)) editor.add_request_model_to_method(path=self.Path, method_name=self.Method, request_model=self.RequestModel) if self.RequestParameters: default_value = { 'Required': False, 'Caching': False } parameters = [] for parameter in self.RequestParameters: if isinstance(parameter, dict): parameter_name, parameter_value = next(iter(parameter.items())) if not re.match('method\.request\.(querystring|path|header)\.', parameter_name): raise InvalidEventException( self.relative_id, "Invalid value for 'RequestParameters' property. Keys must be in the format " "'method.request.[querystring|path|header].{value}', " "e.g 'method.request.header.Authorization'.") if not isinstance(parameter_value, dict) or not all(key in REQUEST_PARAMETER_PROPERTIES for key in parameter_value.keys()): raise InvalidEventException( self.relative_id, "Invalid value for 'RequestParameters' property. Values must be an object, " "e.g { Required: true, Caching: false }") settings = default_value.copy() settings.update(parameter_value) settings.update({'Name': parameter_name}) parameters.append(settings) elif isinstance(parameter, string_types): if not re.match('method\.request\.(querystring|path|header)\.', parameter): raise InvalidEventException( self.relative_id, "Invalid value for 'RequestParameters' property. Keys must be in the format " "'method.request.[querystring|path|header].{value}', " "e.g 'method.request.header.Authorization'.") settings = default_value.copy() settings.update({'Name': parameter}) parameters.append(settings) else: raise InvalidEventException( self.relative_id, "Invalid value for 'RequestParameters' property. Property must be either a string or an object") editor.add_request_parameters_to_method(path=self.Path, method_name=self.Method, request_parameters=parameters) api["DefinitionBody"] = editor.swagger
class SamApi(SamResourceMacro): """SAM rest API macro. """ resource_type = 'AWS::Serverless::Api' property_types = { # Internal property set only by Implicit API plugin. If set to True, the API Event Source code will inject # Lambda Integration URI to the Swagger. To preserve backwards compatibility, this must be set only for # Implicit APIs. For Explicit APIs, customer is expected to set integration URI themselves. # In the future, we might rename and expose this property to customers so they can have SAM manage Explicit APIs # Swagger. '__MANAGE_SWAGGER': PropertyType(False, is_type(bool)), 'Name': PropertyType(False, one_of(is_str(), is_type(dict))), 'StageName': PropertyType(True, one_of(is_str(), is_type(dict))), 'DefinitionBody': PropertyType(False, is_type(dict)), 'DefinitionUri': PropertyType(False, one_of(is_str(), is_type(dict))), 'CacheClusterEnabled': PropertyType(False, is_type(bool)), 'CacheClusterSize': PropertyType(False, is_str()), 'Variables': PropertyType(False, is_type(dict)), 'EndpointConfiguration': PropertyType(False, is_str()), 'MethodSettings': PropertyType(False, is_type(list)), 'BinaryMediaTypes': PropertyType(False, is_type(list)), 'Cors': PropertyType(False, one_of(is_str(), is_type(dict))) } referable_properties = { "Stage": ApiGatewayStage.resource_type, "Deployment": ApiGatewayDeployment.resource_type, } def to_cloudformation(self, **kwargs): """Returns the API Gateway RestApi, Deployment, and Stage to which this SAM Api corresponds. :param dict kwargs: already-converted resources that may need to be modified when converting this \ macro to pure CloudFormation :returns: a list of vanilla CloudFormation Resources, to which this Function expands :rtype: list """ resources = [] api_generator = ApiGenerator( self.logical_id, self.CacheClusterEnabled, self.CacheClusterSize, self.Variables, self.depends_on, self.DefinitionBody, self.DefinitionUri, self.Name, self.StageName, endpoint_configuration=self.EndpointConfiguration, method_settings=self.MethodSettings, binary_media=self.BinaryMediaTypes, cors=self.Cors) rest_api, deployment, stage = api_generator.to_cloudformation() resources.extend([rest_api, deployment, stage]) return resources
class Cognito(PushEventSource): resource_type = 'Cognito' principal = 'cognito-idp.amazonaws.com' property_types = { 'UserPool': PropertyType(True, is_str()), 'Trigger': PropertyType(True, one_of(is_str(), list_of(is_str()))) } def resources_to_link(self, resources): if isinstance(self.UserPool, dict) and 'Ref' in self.UserPool: userpool_id = self.UserPool['Ref'] if userpool_id in resources: return { 'userpool': resources[userpool_id], 'userpool_id': userpool_id } raise InvalidEventException( self.relative_id, "Cognito events must reference a Cognito UserPool in the same template.") def to_cloudformation(self, **kwargs): function = kwargs.get('function') if not function: raise TypeError("Missing required keyword argument: function") if 'userpool' not in kwargs or kwargs['userpool'] is None: raise TypeError("Missing required keyword argument: userpool") if 'userpool_id' not in kwargs or kwargs['userpool_id'] is None: raise TypeError("Missing required keyword argument: userpool_id") userpool = kwargs['userpool'] userpool_id = kwargs['userpool_id'] resources = [] resources.append( self._construct_permission( function, event_source_token=self.UserPool, prefix=function.logical_id + "Cognito")) self._inject_lambda_config(function, userpool) resources.append(CognitoUserPool.from_dict(userpool_id, userpool)) return resources def _inject_lambda_config(self, function, userpool): event_triggers = self.Trigger if isinstance(self.Trigger, string_types): event_triggers = [self.Trigger] # TODO can these be conditional? properties = userpool.get('Properties', None) if properties is None: properties = {} userpool['Properties'] = properties lambda_config = properties.get('LambdaConfig', None) if lambda_config is None: lambda_config = {} properties['LambdaConfig'] = lambda_config for event_trigger in event_triggers: if event_trigger not in lambda_config: lambda_config[event_trigger] = function.get_runtime_attr("arn") else: raise InvalidEventException( self.relative_id, 'Cognito trigger "{trigger}" defined multiple times.'.format( trigger=self.Trigger)) return userpool
class PullEventSource(ResourceMacro): """Base class for pull event sources for SAM Functions. The pull events are Kinesis Streams, DynamoDB Streams, and SQS Queues. All of these correspond to an EventSourceMapping in Lambda, and require that the execution role be given to Kinesis Streams, DynamoDB Streams, or SQS Queues, respectively. :cvar str policy_arn: The ARN of the AWS managed role policy corresponding to this pull event source """ resource_type = None property_types = { "Stream": PropertyType(False, is_str()), "Queue": PropertyType(False, is_str()), "BatchSize": PropertyType(False, is_type(int)), "StartingPosition": PropertyType(False, is_str()), "Enabled": PropertyType(False, is_type(bool)), "MaximumBatchingWindowInSeconds": PropertyType(False, is_type(int)), "MaximumRetryAttempts": PropertyType(False, is_type(int)), "BisectBatchOnFunctionError": PropertyType(False, is_type(bool)), "MaximumRecordAgeInSeconds": PropertyType(False, is_type(int)), "DestinationConfig": PropertyType(False, is_type(dict)), "ParallelizationFactor": PropertyType(False, is_type(int)), } def get_policy_arn(self): raise NotImplementedError("Subclass must implement this method") def to_cloudformation(self, **kwargs): """Returns the Lambda EventSourceMapping to which this pull event corresponds. Adds the appropriate managed policy to the function's execution role, if such a role is provided. :param dict kwargs: a dict containing the execution role generated for the function :returns: a list of vanilla CloudFormation Resources, to which this pull event expands :rtype: list """ function = kwargs.get("function") if not function: raise TypeError("Missing required keyword argument: function") resources = [] lambda_eventsourcemapping = LambdaEventSourceMapping(self.logical_id) resources.append(lambda_eventsourcemapping) try: # Name will not be available for Alias resources function_name_or_arn = function.get_runtime_attr("name") except NotImplementedError: function_name_or_arn = function.get_runtime_attr("arn") if not self.Stream and not self.Queue: raise InvalidEventException( self.relative_id, "No Queue (for SQS) or Stream (for Kinesis or DynamoDB) provided." ) if self.Stream and not self.StartingPosition: raise InvalidEventException( self.relative_id, "StartingPosition is required for Kinesis and DynamoDB.") lambda_eventsourcemapping.FunctionName = function_name_or_arn lambda_eventsourcemapping.EventSourceArn = self.Stream or self.Queue lambda_eventsourcemapping.StartingPosition = self.StartingPosition lambda_eventsourcemapping.BatchSize = self.BatchSize lambda_eventsourcemapping.Enabled = self.Enabled lambda_eventsourcemapping.MaximumBatchingWindowInSeconds = self.MaximumBatchingWindowInSeconds lambda_eventsourcemapping.MaximumRetryAttempts = self.MaximumRetryAttempts lambda_eventsourcemapping.BisectBatchOnFunctionError = self.BisectBatchOnFunctionError lambda_eventsourcemapping.MaximumRecordAgeInSeconds = self.MaximumRecordAgeInSeconds lambda_eventsourcemapping.ParallelizationFactor = self.ParallelizationFactor destination_config_policy = None if self.DestinationConfig: # `Type` property is for sam to attach the right policies destination_type = self.DestinationConfig.get("OnFailure").get( "Type") # SAM attaches the policies for SQS or SNS only if 'Type' is given if destination_type: # the values 'SQS' and 'SNS' are allowed. No intrinsics are allowed if destination_type not in ["SQS", "SNS"]: raise InvalidEventException( self.logical_id, "The only valid values for 'Type' are 'SQS' and 'SNS'") if self.DestinationConfig.get("OnFailure") is None: raise InvalidEventException( self.logical_id, "'OnFailure' is a required field for " "'DestinationConfig'") if destination_type == "SQS": queue_arn = self.DestinationConfig.get("OnFailure").get( "Destination") destination_config_policy = IAMRolePolicies( ).sqs_send_message_role_policy(queue_arn, self.logical_id) elif destination_type == "SNS": sns_topic_arn = self.DestinationConfig.get( "OnFailure").get("Destination") destination_config_policy = IAMRolePolicies( ).sns_publish_role_policy(sns_topic_arn, self.logical_id) lambda_eventsourcemapping.DestinationConfig = self.DestinationConfig if "Condition" in function.resource_attributes: lambda_eventsourcemapping.set_resource_attribute( "Condition", function.resource_attributes["Condition"]) if "role" in kwargs: self._link_policy(kwargs["role"], destination_config_policy) return resources def _link_policy(self, role, destination_config_policy=None): """If this source triggers a Lambda function whose execution role is auto-generated by SAM, add the appropriate managed policy to this Role. :param model.iam.IAMRole role: the execution role generated for the function """ policy_arn = self.get_policy_arn() if role is not None and policy_arn not in role.ManagedPolicyArns: role.ManagedPolicyArns.append(policy_arn) # add SQS or SNS policy only if role is present in kwargs if role is not None and destination_config_policy is not None and destination_config_policy: if role.Policies is None: role.Policies = [] role.Policies.append(destination_config_policy) if role.Policies and destination_config_policy not in role.Policies: # do not add the policy if the same policy document is already present if not destination_config_policy.get("PolicyDocument") in [ d["PolicyDocument"] for d in role.Policies ]: role.Policies.append(destination_config_policy)
class LambdaFunction(Resource): resource_type = 'AWS::Lambda::Function' property_types = { 'Code': PropertyType(True, is_type(dict)), 'DeadLetterConfig': PropertyType(False, is_type(dict)), 'Description': PropertyType(False, is_str()), 'FunctionName': PropertyType(False, is_str()), 'Handler': PropertyType(True, is_str()), 'MemorySize': PropertyType(False, is_type(int)), 'Role': PropertyType(False, is_str()), 'Runtime': PropertyType(False, is_str()), 'Timeout': PropertyType(False, is_type(int)), 'VpcConfig': PropertyType(False, is_type(dict)), 'Environment': PropertyType(False, is_type(dict)), 'Tags': PropertyType(False, list_of(is_type(dict))), 'TracingConfig': PropertyType(False, is_type(dict)), 'KmsKeyArn': PropertyType(False, one_of(is_type(dict), is_str())), 'Layers': PropertyType(False, list_of(one_of(is_str(), is_type(dict)))), 'ReservedConcurrentExecutions': PropertyType(False, any_type()) } runtime_attrs = { "name": lambda self: ref(self.logical_id), "arn": lambda self: fnGetAtt(self.logical_id, "Arn") }
class PullEventSource(ResourceMacro): """Base class for pull event sources for SAM Functions. The pull events are Kinesis Streams, DynamoDB Streams, and SQS Queues. All of these correspond to an EventSourceMapping in Lambda, and require that the execution role be given to Kinesis Streams, DynamoDB Streams, or SQS Queues, respectively. :cvar str policy_arn: The ARN of the AWS managed role policy corresponding to this pull event source """ resource_type = None property_types = { 'Stream': PropertyType(False, is_str()), 'Queue': PropertyType(False, is_str()), 'BatchSize': PropertyType(False, is_type(int)), 'StartingPosition': PropertyType(False, is_str()), 'Enabled': PropertyType(False, is_type(bool)), 'MaximumBatchingWindowInSeconds': PropertyType(False, is_type(int)), 'MaximumRetryAttempts': PropertyType(False, is_type(int)), 'BisectBatchOnFunctionError': PropertyType(False, is_type(bool)), 'MaximumRecordAgeInSeconds': PropertyType(False, is_type(int)), 'DestinationConfig': PropertyType(False, is_type(dict)), 'ParallelizationFactor': PropertyType(False, is_type(int)) } def get_policy_arn(self): raise NotImplementedError("Subclass must implement this method") def to_cloudformation(self, **kwargs): """Returns the Lambda EventSourceMapping to which this pull event corresponds. Adds the appropriate managed policy to the function's execution role, if such a role is provided. :param dict kwargs: a dict containing the execution role generated for the function :returns: a list of vanilla CloudFormation Resources, to which this pull event expands :rtype: list """ function = kwargs.get('function') if not function: raise TypeError("Missing required keyword argument: function") resources = [] lambda_eventsourcemapping = LambdaEventSourceMapping(self.logical_id) resources.append(lambda_eventsourcemapping) try: # Name will not be available for Alias resources function_name_or_arn = function.get_runtime_attr("name") except NotImplementedError: function_name_or_arn = function.get_runtime_attr("arn") if not self.Stream and not self.Queue: raise InvalidEventException( self.relative_id, "No Queue (for SQS) or Stream (for Kinesis or DynamoDB) provided." ) if self.Stream and not self.StartingPosition: raise InvalidEventException( self.relative_id, "StartingPosition is required for Kinesis and DynamoDB.") lambda_eventsourcemapping.FunctionName = function_name_or_arn lambda_eventsourcemapping.EventSourceArn = self.Stream or self.Queue lambda_eventsourcemapping.StartingPosition = self.StartingPosition lambda_eventsourcemapping.BatchSize = self.BatchSize lambda_eventsourcemapping.Enabled = self.Enabled lambda_eventsourcemapping.MaximumBatchingWindowInSeconds = self.MaximumBatchingWindowInSeconds lambda_eventsourcemapping.MaximumRetryAttempts = self.MaximumRetryAttempts lambda_eventsourcemapping.BisectBatchOnFunctionError = self.BisectBatchOnFunctionError lambda_eventsourcemapping.MaximumRecordAgeInSeconds = self.MaximumRecordAgeInSeconds lambda_eventsourcemapping.DestinationConfig = self.DestinationConfig lambda_eventsourcemapping.ParallelizationFactor = self.ParallelizationFactor if 'Condition' in function.resource_attributes: lambda_eventsourcemapping.set_resource_attribute( 'Condition', function.resource_attributes['Condition']) if 'role' in kwargs: self._link_policy(kwargs['role']) return resources def _link_policy(self, role): """If this source triggers a Lambda function whose execution role is auto-generated by SAM, add the appropriate managed policy to this Role. :param model.iam.IAMROle role: the execution role generated for the function """ policy_arn = self.get_policy_arn() if role is not None and policy_arn not in role.ManagedPolicyArns: role.ManagedPolicyArns.append(policy_arn)
class ApiGatewayStage(Resource): resource_type = "AWS::ApiGateway::Stage" property_types = { "AccessLogSetting": PropertyType(False, is_type(dict)), "CacheClusterEnabled": PropertyType(False, is_type(bool)), "CacheClusterSize": PropertyType(False, is_str()), "CanarySetting": PropertyType(False, is_type(dict)), "ClientCertificateId": PropertyType(False, is_str()), "DeploymentId": PropertyType(True, is_str()), "Description": PropertyType(False, is_str()), "RestApiId": PropertyType(True, is_str()), "StageName": PropertyType(True, one_of(is_str(), is_type(dict))), "Tags": PropertyType(False, list_of(is_type(dict))), "TracingEnabled": PropertyType(False, is_type(bool)), "Variables": PropertyType(False, is_type(dict)), "MethodSettings": PropertyType(False, is_type(list)), } runtime_attrs = {"stage_name": lambda self: ref(self.logical_id)} def update_deployment_ref(self, deployment_logical_id): self.DeploymentId = ref(deployment_logical_id)
class Schedule(EventSource): """Scheduled executions for SAM State Machine.""" resource_type = "Schedule" principal = "events.amazonaws.com" property_types = { "Schedule": PropertyType(True, is_str()), "Input": PropertyType(False, is_str()), "Enabled": PropertyType(False, is_type(bool)), "Name": PropertyType(False, is_str()), "Description": PropertyType(False, is_str()), "DeadLetterConfig": PropertyType(False, is_type(dict)), "RetryPolicy": PropertyType(False, is_type(dict)), } def to_cloudformation(self, resource, **kwargs): """Returns the EventBridge Rule and IAM Role to which this Schedule event source corresponds. :param dict kwargs: no existing resources need to be modified :returns: a list of vanilla CloudFormation Resources, to which this Schedule event expands :rtype: list """ resources = [] permissions_boundary = kwargs.get("permissions_boundary") events_rule = EventsRule(self.logical_id) resources.append(events_rule) events_rule.ScheduleExpression = self.Schedule if self.Enabled is not None: events_rule.State = "ENABLED" if self.Enabled else "DISABLED" events_rule.Name = self.Name events_rule.Description = self.Description if CONDITION in resource.resource_attributes: events_rule.set_resource_attribute( CONDITION, resource.resource_attributes[CONDITION]) role = self._construct_role(resource, permissions_boundary) resources.append(role) source_arn = events_rule.get_runtime_attr("arn") dlq_queue_arn = None if self.DeadLetterConfig is not None: EventBridgeRuleUtils.validate_dlq_config(self.logical_id, self.DeadLetterConfig) dlq_queue_arn, dlq_resources = EventBridgeRuleUtils.get_dlq_queue_arn_and_resources( self, source_arn) resources.extend(dlq_resources) events_rule.Targets = [ self._construct_target(resource, role, dlq_queue_arn) ] return resources def _construct_target(self, resource, role, dead_letter_queue_arn=None): """Constructs the Target property for the EventBridge Rule. :returns: the Target property :rtype: dict """ target = { "Arn": resource.get_runtime_attr("arn"), "Id": self.logical_id + "StepFunctionsTarget", "RoleArn": role.get_runtime_attr("arn"), } if self.Input is not None: target["Input"] = self.Input if self.DeadLetterConfig is not None: target["DeadLetterConfig"] = {"Arn": dead_letter_queue_arn} if self.RetryPolicy is not None: target["RetryPolicy"] = self.RetryPolicy return target
class ApiGatewayAccount(Resource): resource_type = "AWS::ApiGateway::Account" property_types = { "CloudWatchRoleArn": PropertyType(False, one_of(is_str(), is_type(dict))) }
class LambdaEventSourceMapping(Resource): resource_type = "AWS::Lambda::EventSourceMapping" property_types = { "BatchSize": PropertyType(False, is_type(int)), "Enabled": PropertyType(False, is_type(bool)), "EventSourceArn": PropertyType(True, is_str()), "FunctionName": PropertyType(True, is_str()), "MaximumBatchingWindowInSeconds": PropertyType(False, is_type(int)), "MaximumRetryAttempts": PropertyType(False, is_type(int)), "BisectBatchOnFunctionError": PropertyType(False, is_type(bool)), "MaximumRecordAgeInSeconds": PropertyType(False, is_type(int)), "DestinationConfig": PropertyType(False, is_type(dict)), "ParallelizationFactor": PropertyType(False, is_type(int)), "StartingPosition": PropertyType(False, is_str()), "Topics": PropertyType(False, is_type(list)), } runtime_attrs = {"name": lambda self: ref(self.logical_id)}
class SamSimpleTable(SamResourceMacro): """SAM simple table macro. """ resource_type = 'AWS::Serverless::SimpleTable' property_types = { 'PrimaryKey': PropertyType(False, dict_of(is_str(), is_str())), 'ProvisionedThroughput': PropertyType(False, dict_of(is_str(), one_of(is_type(int), is_type(dict)))), 'TableName': PropertyType(False, one_of(is_str(), is_type(dict))), 'Tags': PropertyType(False, is_type(dict)), 'SSESpecification': PropertyType(False, is_type(dict)) } attribute_type_conversions = {'String': 'S', 'Number': 'N', 'Binary': 'B'} def to_cloudformation(self, **kwargs): dynamodb_resources = self._construct_dynamodb_table() return [dynamodb_resources] def _construct_dynamodb_table(self): dynamodb_table = DynamoDBTable(self.logical_id, depends_on=self.depends_on, attributes=self.resource_attributes) if self.PrimaryKey: if 'Name' not in self.PrimaryKey or 'Type' not in self.PrimaryKey: raise InvalidResourceException( self.logical_id, '\'PrimaryKey\' is missing required Property \'Name\' or \'Type\'.' ) primary_key = { 'AttributeName': self.PrimaryKey['Name'], 'AttributeType': self._convert_attribute_type(self.PrimaryKey['Type']) } else: primary_key = {'AttributeName': 'id', 'AttributeType': 'S'} dynamodb_table.AttributeDefinitions = [primary_key] dynamodb_table.KeySchema = [{ 'AttributeName': primary_key['AttributeName'], 'KeyType': 'HASH' }] if self.ProvisionedThroughput: dynamodb_table.ProvisionedThroughput = self.ProvisionedThroughput else: dynamodb_table.BillingMode = 'PAY_PER_REQUEST' if self.SSESpecification: dynamodb_table.SSESpecification = self.SSESpecification if self.TableName: dynamodb_table.TableName = self.TableName if bool(self.Tags): dynamodb_table.Tags = get_tag_list(self.Tags) return dynamodb_table def _convert_attribute_type(self, attribute_type): if attribute_type in self.attribute_type_conversions: return self.attribute_type_conversions[attribute_type] raise InvalidResourceException( self.logical_id, 'Invalid \'Type\' "{actual}".'.format(actual=attribute_type))
class SamFunction(SamResourceMacro): """SAM function macro. """ resource_type = 'AWS::Serverless::Function' property_types = { 'FunctionName': PropertyType(False, one_of(is_str(), is_type(dict))), 'Handler': PropertyType(True, is_str()), 'Runtime': PropertyType(True, is_str()), 'CodeUri': PropertyType(False, one_of(is_str(), is_type(dict))), 'InlineCode': PropertyType(False, one_of(is_str(), is_type(dict))), 'DeadLetterQueue': PropertyType(False, is_type(dict)), 'Description': PropertyType(False, is_str()), 'MemorySize': PropertyType(False, is_type(int)), 'Timeout': PropertyType(False, is_type(int)), 'VpcConfig': PropertyType(False, is_type(dict)), 'Role': PropertyType(False, is_str()), 'Policies': PropertyType( False, one_of(is_str(), list_of(one_of(is_str(), is_type(dict), is_type(dict))))), 'PermissionsBoundary': PropertyType(False, is_str()), 'Environment': PropertyType(False, dict_of(is_str(), is_type(dict))), 'Events': PropertyType(False, dict_of(is_str(), is_type(dict))), 'Tags': PropertyType(False, is_type(dict)), 'Tracing': PropertyType(False, one_of(is_type(dict), is_str())), 'KmsKeyArn': PropertyType(False, one_of(is_type(dict), is_str())), 'DeploymentPreference': PropertyType(False, is_type(dict)), 'ReservedConcurrentExecutions': PropertyType(False, any_type()), 'Layers': PropertyType(False, list_of(one_of(is_str(), is_type(dict)))), # Intrinsic functions in value of Alias property are not supported, yet 'AutoPublishAlias': PropertyType(False, one_of(is_str())), 'VersionDescription': PropertyType(False, is_str()) } event_resolver = ResourceTypeResolver( samtranslator.model.eventsources, samtranslator.model.eventsources.pull, samtranslator.model.eventsources.push, samtranslator.model.eventsources.cloudwatchlogs) # DeadLetterQueue dead_letter_queue_policy_actions = { 'SQS': 'sqs:SendMessage', 'SNS': 'sns:Publish' } # Customers can refer to the following properties of SAM function referable_properties = { "Alias": LambdaAlias.resource_type, "Version": LambdaVersion.resource_type, } def resources_to_link(self, resources): try: return { 'event_resources': self._event_resources_to_link(resources) } except InvalidEventException as e: raise InvalidResourceException(self.logical_id, e.message) def to_cloudformation(self, **kwargs): """Returns the Lambda function, role, and event resources to which this SAM Function corresponds. :param dict kwargs: already-converted resources that may need to be modified when converting this \ macro to pure CloudFormation :returns: a list of vanilla CloudFormation Resources, to which this Function expands :rtype: list """ resources = [] intrinsics_resolver = kwargs["intrinsics_resolver"] mappings_resolver = kwargs.get("mappings_resolver", None) if self.DeadLetterQueue: self._validate_dlq() lambda_function = self._construct_lambda_function() resources.append(lambda_function) lambda_alias = None if self.AutoPublishAlias: alias_name = self._get_resolved_alias_name("AutoPublishAlias", self.AutoPublishAlias, intrinsics_resolver) lambda_version = self._construct_version( lambda_function, intrinsics_resolver=intrinsics_resolver) lambda_alias = self._construct_alias(alias_name, lambda_function, lambda_version) resources.append(lambda_version) resources.append(lambda_alias) if self.DeploymentPreference: self._validate_deployment_preference_and_add_update_policy( kwargs.get('deployment_preference_collection', None), lambda_alias, intrinsics_resolver, mappings_resolver) managed_policy_map = kwargs.get('managed_policy_map', {}) if not managed_policy_map: raise Exception('Managed policy map is empty, but should not be.') execution_role = None if lambda_function.Role is None: execution_role = self._construct_role(managed_policy_map) lambda_function.Role = execution_role.get_runtime_attr('arn') resources.append(execution_role) try: resources += self._generate_event_resources( lambda_function, execution_role, kwargs['event_resources'], lambda_alias=lambda_alias) except InvalidEventException as e: raise InvalidResourceException(self.logical_id, e.message) return resources def _get_resolved_alias_name(self, property_name, original_alias_value, intrinsics_resolver): """ Alias names can be supplied as an intrinsic function. This method tries to extract alias name from a reference to a parameter. If it cannot completely resolve (ie. if a complex intrinsic function was used), then this method raises an exception. If alias name is just a plain string, it will return as is :param dict or string original_alias_value: Value of Alias property as provided by the customer :param samtranslator.intrinsics.resolver.IntrinsicsResolver intrinsics_resolver: Instance of the resolver that knows how to resolve parameter references :return string: Alias name :raises InvalidResourceException: If the value is a complex intrinsic function that cannot be resolved """ # Try to resolve. resolved_alias_name = intrinsics_resolver.resolve_parameter_refs( original_alias_value) if not isinstance(resolved_alias_name, string_types): # This is still a dictionary which means we are not able to completely resolve intrinsics raise InvalidResourceException( self.logical_id, "'{}' must be a string or a Ref to a template parameter". format(property_name)) return resolved_alias_name def _construct_lambda_function(self): """Constructs and returns the Lambda function. :returns: a list containing the Lambda function and execution role resources :rtype: list """ lambda_function = LambdaFunction(self.logical_id, depends_on=self.depends_on, attributes=self.resource_attributes) if self.FunctionName: lambda_function.FunctionName = self.FunctionName lambda_function.Handler = self.Handler lambda_function.Runtime = self.Runtime lambda_function.Description = self.Description lambda_function.MemorySize = self.MemorySize lambda_function.Timeout = self.Timeout lambda_function.VpcConfig = self.VpcConfig lambda_function.Role = self.Role lambda_function.Environment = self.Environment lambda_function.Code = self._construct_code_dict() lambda_function.KmsKeyArn = self.KmsKeyArn lambda_function.ReservedConcurrentExecutions = self.ReservedConcurrentExecutions lambda_function.Tags = self._construct_tag_list(self.Tags) lambda_function.Layers = self.Layers if self.Tracing: lambda_function.TracingConfig = {"Mode": self.Tracing} if self.DeadLetterQueue: lambda_function.DeadLetterConfig = { "TargetArn": self.DeadLetterQueue['TargetArn'] } return lambda_function def _construct_role(self, managed_policy_map): """Constructs a Lambda execution role based on this SAM function's Policies property. :returns: the generated IAM Role :rtype: model.iam.IAMRole """ execution_role = IAMRole( self.logical_id + 'Role', attributes=self.get_passthrough_resource_attributes()) execution_role.AssumeRolePolicyDocument = IAMRolePolicies.lambda_assume_role_policy( ) managed_policy_arns = [ ArnGenerator.generate_aws_managed_policy_arn( 'service-role/AWSLambdaBasicExecutionRole') ] if self.Tracing: managed_policy_arns.append( ArnGenerator.generate_aws_managed_policy_arn( 'AWSXrayWriteOnlyAccess')) function_policies = FunctionPolicies( {"Policies": self.Policies}, # No support for policy templates in the "core" policy_template_processor=None) policy_documents = [] if self.DeadLetterQueue: policy_documents.append( IAMRolePolicies.dead_letter_queue_policy( self.dead_letter_queue_policy_actions[ self.DeadLetterQueue['Type']], self.DeadLetterQueue['TargetArn'])) for index, policy_entry in enumerate(function_policies.get()): if policy_entry.type is PolicyTypes.POLICY_STATEMENT: policy_documents.append({ 'PolicyName': execution_role.logical_id + 'Policy' + str(index), 'PolicyDocument': policy_entry.data }) elif policy_entry.type is PolicyTypes.MANAGED_POLICY: # There are three options: # Managed Policy Name (string): Try to convert to Managed Policy ARN # Managed Policy Arn (string): Insert it directly into the list # Intrinsic Function (dict): Insert it directly into the list # # When you insert into managed_policy_arns list, de-dupe to prevent same ARN from showing up twice # policy_arn = policy_entry.data if isinstance(policy_entry.data, string_types ) and policy_entry.data in managed_policy_map: policy_arn = managed_policy_map[policy_entry.data] # De-Duplicate managed policy arns before inserting. Mainly useful # when customer specifies a managed policy which is already inserted # by SAM, such as AWSLambdaBasicExecutionRole if policy_arn not in managed_policy_arns: managed_policy_arns.append(policy_arn) else: # Policy Templates are not supported here in the "core" raise InvalidResourceException( self.logical_id, "Policy at index {} in the 'Policies' property is not valid" .format(index)) execution_role.ManagedPolicyArns = list(managed_policy_arns) execution_role.Policies = policy_documents or None execution_role.PermissionsBoundary = self.PermissionsBoundary return execution_role def _validate_dlq(self): """Validates whether the DeadLetterQueue LogicalId is validation :raise: InvalidResourceException """ # Validate required logical ids valid_dlq_types = str( list(self.dead_letter_queue_policy_actions.keys())) if not self.DeadLetterQueue.get( 'Type') or not self.DeadLetterQueue.get('TargetArn'): raise InvalidResourceException( self.logical_id, "'DeadLetterQueue' requires Type and TargetArn properties to be specified" .format(valid_dlq_types)) # Validate required Types if not self.DeadLetterQueue[ 'Type'] in self.dead_letter_queue_policy_actions: raise InvalidResourceException( self.logical_id, "'DeadLetterQueue' requires Type of {}".format( valid_dlq_types)) def _event_resources_to_link(self, resources): event_resources = {} if self.Events: for logical_id, event_dict in self.Events.items(): try: event_source = self.event_resolver.resolve_resource_type( event_dict).from_dict(self.logical_id + logical_id, event_dict, logical_id) except (TypeError, AttributeError) as e: raise InvalidEventException(logical_id, "{}".format(e)) event_resources[logical_id] = event_source.resources_to_link( resources) return event_resources def _generate_event_resources(self, lambda_function, execution_role, event_resources, lambda_alias=None): """Generates and returns the resources associated with this function's events. :param model.lambda_.LambdaFunction lambda_function: generated Lambda function :param iam.IAMRole execution_role: generated Lambda execution role :param implicit_api: Global Implicit API resource where the implicit APIs get attached to, if necessary :param implicit_api_stage: Global implicit API stage resource where implicit APIs get attached to, if necessary :param event_resources: All the event sources associated with this Lambda function :param model.lambda_.LambdaAlias lambda_alias: Optional Lambda Alias resource if we want to connect the event sources to this alias :returns: a list containing the function's event resources :rtype: list """ resources = [] if self.Events: for logical_id, event_dict in self.Events.items(): try: eventsource = self.event_resolver.resolve_resource_type( event_dict).from_dict( lambda_function.logical_id + logical_id, event_dict, logical_id) except TypeError as e: raise InvalidEventException(logical_id, "{}".format(e)) kwargs = { # When Alias is provided, connect all event sources to the alias and *not* the function 'function': lambda_alias or lambda_function, 'role': execution_role, } for name, resource in event_resources[logical_id].items(): kwargs[name] = resource resources += eventsource.to_cloudformation(**kwargs) return resources def _construct_code_dict(self): if self.InlineCode: return {"ZipFile": self.InlineCode} elif self.CodeUri: return construct_s3_location_object(self.CodeUri, self.logical_id, 'CodeUri') else: raise InvalidResourceException( self.logical_id, "Either 'InlineCode' or 'CodeUri' must be set") def _construct_version(self, function, intrinsics_resolver): """Constructs a Lambda Version resource that will be auto-published when CodeUri of the function changes. Old versions will not be deleted without a direct reference from the CloudFormation template. :param model.lambda_.LambdaFunction function: Lambda function object that is being connected to a version :param model.intrinsics.resolver.IntrinsicsResolver intrinsics_resolver: Class that can help resolve references to parameters present in CodeUri. It is a common usecase to set S3Key of Code to be a template parameter. Need to resolve the values otherwise we will never detect a change in Code dict :return: Lambda function Version resource """ code_dict = function.Code if not code_dict: raise ValueError( "Lambda function code must be a valid non-empty dictionary") if not intrinsics_resolver: raise ValueError( "intrinsics_resolver is required for versions creation") # Resolve references to template parameters before creating hash. This will *not* resolve all intrinsics # because we cannot resolve runtime values like Arn of a resource. For purposes of detecting changes, this # is good enough. Here is why: # # When using intrinsic functions there are two cases when has must change: # - Value of the template parameter changes # - (or) LogicalId of a referenced resource changes ie. !GetAtt NewResource.Arn # # Later case will already change the hash because some value in the Code dictionary changes. We handle the # first case by resolving references to template parameters. It is okay even if these references are # present inside another intrinsic such as !Join. The resolver will replace the reference with the parameter's # value and keep all other parts of !Join identical. This will still trigger a change in the hash. code_dict = intrinsics_resolver.resolve_parameter_refs(code_dict) # Construct the LogicalID of Lambda version by appending 10 characters of SHA of CodeUri. This is necessary # to trigger creation of a new version every time code location changes. Since logicalId changes, CloudFormation # will drop the old version and create a new one for us. We set a DeletionPolicy on the version resource to # prevent CloudFormation from actually deleting the underlying version resource # # SHA Collisions: For purposes of triggering a new update, we are concerned about just the difference previous # and next hashes. The chances that two subsequent hashes collide is fairly low. prefix = "{id}Version".format(id=self.logical_id) logical_id = logical_id_generator.LogicalIdGenerator( prefix, code_dict).gen() attributes = self.get_passthrough_resource_attributes() if attributes is None: attributes = {} attributes["DeletionPolicy"] = "Retain" lambda_version = LambdaVersion(logical_id=logical_id, attributes=attributes) lambda_version.FunctionName = function.get_runtime_attr('name') lambda_version.Description = self.VersionDescription return lambda_version def _construct_alias(self, name, function, version): """Constructs a Lambda Alias for the given function and pointing to the given version :param string name: Name of the alias :param model.lambda_.LambdaFunction function: Lambda function object to associate the alias with :param model.lambda_.LambdaVersion version: Lambda version object to associate the alias with :return: Lambda alias object :rtype model.lambda_.LambdaAlias """ if not name: raise InvalidResourceException( self.logical_id, "Alias name is required to create an alias") logical_id = "{id}Alias{suffix}".format(id=function.logical_id, suffix=name) alias = LambdaAlias( logical_id=logical_id, attributes=self.get_passthrough_resource_attributes()) alias.Name = name alias.FunctionName = function.get_runtime_attr('name') alias.FunctionVersion = version.get_runtime_attr("version") return alias def _validate_deployment_preference_and_add_update_policy( self, deployment_preference_collection, lambda_alias, intrinsics_resolver, mappings_resolver): if 'Enabled' in self.DeploymentPreference: self.DeploymentPreference[ 'Enabled'] = intrinsics_resolver.resolve_parameter_refs( self.DeploymentPreference['Enabled']) if isinstance(self.DeploymentPreference['Enabled'], dict): raise InvalidResourceException( self.logical_id, "'Enabled' must be a boolean value") if 'Type' in self.DeploymentPreference: # resolve intrinsics and mappings for Type preference_type = self.DeploymentPreference['Type'] preference_type = intrinsics_resolver.resolve_parameter_refs( preference_type) preference_type = mappings_resolver.resolve_parameter_refs( preference_type) self.DeploymentPreference['Type'] = preference_type if deployment_preference_collection is None: raise ValueError( 'deployment_preference_collection required for parsing the deployment preference' ) deployment_preference_collection.add(self.logical_id, self.DeploymentPreference) if deployment_preference_collection.get(self.logical_id).enabled: if self.AutoPublishAlias is None: raise InvalidResourceException( self.logical_id, "'DeploymentPreference' requires AutoPublishAlias property to be specified" ) if lambda_alias is None: raise ValueError( 'lambda_alias expected for updating it with the appropriate update policy' ) lambda_alias.set_resource_attribute( "UpdatePolicy", deployment_preference_collection.update_policy( self.logical_id).to_dict())
class SNS(PushEventSource): """SNS topic event source for SAM Functions.""" resource_type = "SNS" principal = "sns.amazonaws.com" property_types = { "Topic": PropertyType(True, is_str()), "Region": PropertyType(False, is_str()), "FilterPolicy": PropertyType(False, dict_of(is_str(), list_of(one_of(is_str(), is_type(dict))))), "SqsSubscription": PropertyType(False, one_of(is_type(bool), is_type(dict))), } def to_cloudformation(self, **kwargs): """Returns the Lambda Permission resource allowing SNS to invoke the function this event source triggers. :param dict kwargs: no existing resources need to be modified :returns: a list of vanilla CloudFormation Resources, to which this SNS event expands :rtype: list """ function = kwargs.get("function") role = kwargs.get("role") if not function: raise TypeError("Missing required keyword argument: function") # SNS -> Lambda if not self.SqsSubscription: subscription = self._inject_subscription( "lambda", function.get_runtime_attr("arn"), self.Topic, self.Region, self.FilterPolicy, function.resource_attributes, ) return [self._construct_permission(function, source_arn=self.Topic), subscription] # SNS -> SQS(Create New) -> Lambda if isinstance(self.SqsSubscription, bool): resources = [] queue = self._inject_sqs_queue() queue_arn = queue.get_runtime_attr("arn") queue_url = queue.get_runtime_attr("queue_url") queue_policy = self._inject_sqs_queue_policy(self.Topic, queue_arn, queue_url) subscription = self._inject_subscription( "sqs", queue_arn, self.Topic, self.Region, self.FilterPolicy, function.resource_attributes ) event_source = self._inject_sqs_event_source_mapping(function, role, queue_arn) resources = resources + event_source resources.append(queue) resources.append(queue_policy) resources.append(subscription) return resources # SNS -> SQS(Existing) -> Lambda resources = [] queue_arn = self.SqsSubscription.get("QueueArn", None) queue_url = self.SqsSubscription.get("QueueUrl", None) if not queue_arn or not queue_url: raise InvalidEventException(self.relative_id, "No QueueARN or QueueURL provided.") queue_policy_logical_id = self.SqsSubscription.get("QueuePolicyLogicalId", None) batch_size = self.SqsSubscription.get("BatchSize", None) enabled = self.SqsSubscription.get("Enabled", None) queue_policy = self._inject_sqs_queue_policy(self.Topic, queue_arn, queue_url, queue_policy_logical_id) subscription = self._inject_subscription( "sqs", queue_arn, self.Topic, self.Region, self.FilterPolicy, function.resource_attributes ) event_source = self._inject_sqs_event_source_mapping(function, role, queue_arn, batch_size, enabled) resources = resources + event_source resources.append(queue_policy) resources.append(subscription) return resources def _inject_subscription(self, protocol, endpoint, topic, region, filterPolicy, resource_attributes): subscription = SNSSubscription(self.logical_id) subscription.Protocol = protocol subscription.Endpoint = endpoint subscription.TopicArn = topic if region is not None: subscription.Region = region if CONDITION in resource_attributes: subscription.set_resource_attribute(CONDITION, resource_attributes[CONDITION]) if filterPolicy is not None: subscription.FilterPolicy = filterPolicy return subscription def _inject_sqs_queue(self): return SQSQueue(self.logical_id + "Queue") def _inject_sqs_event_source_mapping(self, function, role, queue_arn, batch_size=None, enabled=None): event_source = SQS(self.logical_id + "EventSourceMapping") event_source.Queue = queue_arn event_source.BatchSize = batch_size or 10 event_source.Enabled = enabled or True return event_source.to_cloudformation(function=function, role=role) def _inject_sqs_queue_policy(self, topic_arn, queue_arn, queue_url, logical_id=None): policy = SQSQueuePolicy(logical_id or self.logical_id + "QueuePolicy") policy.PolicyDocument = SQSQueuePolicies.sns_topic_send_message_role_policy(topic_arn, queue_arn) policy.Queues = [queue_url] return policy