def lambda_function( *, bucket_name: Parameter, workers_key: Parameter, name: str, role: iam.Role, runtime: str, namespace: str, module: str, memory_size: int, timeout: int, tags: Tags, source_bucket: Optional[Parameter] = None, ) -> awslambda.Function: if source_bucket is None: source_bucket = bucket_name return awslambda.Function( name, Role=role.get_att("Arn"), Code=awslambda.Code(S3Bucket=source_bucket.ref(), S3Key=workers_key.ref()), Handler=f"accretion_workers.{namespace}.{module}.lambda_handler", Environment=_lambda_environment(bucket_name), Runtime=runtime, MemorySize=memory_size, Timeout=timeout, Tags=tags, )
def lambda_resources(self, code_bucket: str, code_key: str) -> list[AWSObject]: """Return resource associated with the construct. :param code_bucket: bucket in which the lambda code is located :param code_key: location of the code in the bucket """ code_params = {"S3Bucket": code_bucket, "S3Key": code_key} if self.code_version is not None: code_params["S3ObjectVersion"] = str(self.code_version) if isinstance(self.role, Role): role = self.role.arn else: role = self.role params = { "Code": awslambda.Code(**code_params), "Timeout": self.timeout, "Description": self.description, "Role": role, "FunctionName": self.name, } if self.runtime is not None: params["Runtime"] = self.runtime if self.handler is not None: params["Handler"] = self.handler if self.memory_size is not None: params["MemorySize"] = self.memory_size return [awslambda.Function(name_to_id(self.name), **params)]
def add_cloudfront_directory_index_rewrite(self, role): # type: (iam.Role) -> awslambda.Function """Add an index CloudFront directory index rewrite lambda function to the template. Keyword Args: role (dict): The index rewrite role resource Return: dict: The CloudFront directory index rewrite lambda function resource """ variables = self.get_variables() code_str = '' path = os.path.join( os.path.dirname(__file__), 'templates/cf_directory_index_rewrite.template.js') with open(path) as file_: code_str = file_.read().replace('{{RewriteDirectoryIndex}}', variables["RewriteDirectoryIndex"]) return self.template.add_resource( awslambda.Function( 'CFDirectoryIndexRewrite', Code=awslambda.Code(ZipFile=code_str), Description= 'Rewrites CF directory HTTP requests to default page', # noqa Handler='index.handler', Role=role.get_att('Arn'), Runtime='nodejs10.x'))
def setup(self, key, name): import_name = key['ImportName'] if 'ImportName' in key else name if 'Code' not in key: self.Code = lbd.Code() try: self.Code.ZipFile = Join('', import_lambda(import_name)) except: self.Code.ZipFile = ( 'print("Use Code parameter in yaml ' f'or create file lib/lambas/{import_name}.code ' 'with lambda code to execute.")') auto_get_props(self, key, recurse=True) self.FunctionName = Sub('${AWS::StackName}-${EnvRole}-%s' % name) if 'Handler' not in key: self.Handler = 'index.lambda_handler' self.Role = GetAtt(f'RoleLambda{name}', 'Arn') if all(k in key for k in ['SecurityGroupIds', 'SubnetIds']): self.VpcConfig = lbd.VPCConfig('') auto_get_props(self.VpcConfig, key, mapname=self.title) # Variables - skip if atEdge - always set Env, EnvRole try: key['AtEdge'] except Exception as e: self.Environment = lbd.Environment(Variables={ 'Env': Ref('EnvShort'), 'EnvRole': Ref('EnvRole'), }) if 'Variables' in key: self.Environment.Variables.update({ varname: get_endvalue(f'{self.title}Variables{varname}') for varname in key['Variables'] })
def lambda_resources( self, code_bucket: Optional[str] = None, code_key: Optional[str] = None, image_uri: Optional[str] = None, ) -> list[AWSObject]: """Return resource associated with the construct. :param code_bucket: bucket in which the lambda code is located :param code_key: location of the code in the bucket :param image_uri: URI of a container image in the Amazon ECR registry """ # If code_bucket and code_key not provided use zipfile if # provided. params: dict[str, Any] = {} if code_bucket is not None and code_key is not None: code_params = {"S3Bucket": code_bucket, "S3Key": code_key} if self.code_version is not None: code_params["S3ObjectVersion"] = str(self.code_version) elif self.code_zipfile is not None: code_params = {"ZipFile": self.code_zipfile} elif image_uri: code_params = {"ImageUri": image_uri} params["PackageType"] = "Image" if isinstance(self.role, Role): role = self.role.arn else: role = self.role params.update( { "Code": awslambda.Code(**code_params), "Timeout": self.timeout, "Description": self.description, "Role": role, "FunctionName": self.name, } ) if self.runtime is not None: params["Runtime"] = self.runtime if self.handler is not None: params["Handler"] = self.handler if self.memory_size is not None: params["MemorySize"] = self.memory_size if self.ephemeral_storage_size is not None: params["EphemeralStorage"] = awslambda.EphemeralStorage( Size=self.ephemeral_storage_size ) return [awslambda.Function(name_to_id(self.name), **params)]
def build() -> StateMachine: lambda_function = awslambda.Function( "HelloWorldFunction", Code=awslambda.Code(ZipFile="foo bar")) workflow = StateMachine( Comment="A simple minimal example of the States language") workflow.start_with(Task("Hello World", Resource=lambda_function)).end() return workflow
def lambda_from_file(python_file): """ Reads a python file and returns a awslambda.Code object :param python_file: :return: """ lambda_function = [] with open(python_file, 'r') as f: lambda_function.extend(f.read().splitlines()) return awslambda.Code(ZipFile=(Join('\n', lambda_function)))
def csvimport_function(csvimport_lambda_role, lambda_environment_dict, s3_bucket, s3_key_value, csvimport_s3_versionid_value): return awslambda.Function( 'CsvimportFunction', Code=awslambda.Code(S3Bucket=Ref(s3_bucket), S3Key=Join('', [s3_key_value, '/csvimport.zip']), S3ObjectVersion=csvimport_s3_versionid_value), Environment=awslambda.Environment(Variables=lambda_environment_dict), Handler='csvimport.lambda_handler', Role=GetAtt(csvimport_lambda_role, 'Arn'), Runtime='python3.6', Timeout=60 * 5)
def generate(template): artifact_bucket = template.add_parameter( Parameter('AuthorizerArtifactBucket', Type='String')) artifact_name = template.add_parameter( Parameter('AuthorizerArtifactName', Type='String')) artifact_version = template.add_parameter( Parameter('AuthorizerArtifactVersion', Type='String')) fnrole = template.add_resource( iam.Role( 'DeviceAuthorizerFnRole', RoleName='DeviceAuthorizerFnRole', ManagedPolicyArns=[ "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole", ], AssumeRolePolicyDocument=awacs.aws.PolicyDocument( Version='2012-10-17', Statement=[ awacs.aws.Statement( Effect=awacs.aws.Allow, Action=[awacs.sts.AssumeRole], Principal=awacs.aws.Principal("Service", "lambda.amazonaws.com"), ) ]))) lambdafn = template.add_resource( awslambda.Function('DeviceAuthorizerFn', FunctionName='DeviceAuthorizerFn', Runtime='nodejs10.x', MemorySize=128, Timeout=10, Handler='index.handler', Role=GetAtt(fnrole, "Arn"), Code=awslambda.Code( S3Bucket=Ref(artifact_bucket), S3Key=Ref(artifact_name), S3ObjectVersion=Ref(artifact_version), ))) template.add_output( Output('DeviceAuthorizerFn', Export=Export('DeviceAuthorizerFn'), Value=Ref(lambdafn))) template.add_output( Output('DeviceAuthorizerFnArn', Export=Export('DeviceAuthorizerFnArn'), Value=GetAtt(lambdafn, "Arn"))) return lambdafn
def showeach_function(showeach_lambda_role, lambda_environment_dict, s3_bucket, s3_key_value, showeach_s3_versionid_value): return awslambda.Function( 'ShoweachFunction', Code=awslambda.Code(S3Bucket=Ref(s3_bucket), S3Key=Join('', [s3_key_value, '/showeach.zip']), S3ObjectVersion=showeach_s3_versionid_value), Environment=awslambda.Environment(Variables=lambda_environment_dict), Handler='showeach.lambda_handler', Role=GetAtt(showeach_lambda_role, 'Arn'), Runtime='python3.6', Timeout=60 * 5)
def build_lambda_function(name: str, image_uri: str) -> awslambda.Function: function = awslambda.Function(lambda_function_name(name)) function_code = awslambda.Code() function_code.ImageUri = image_uri function.PackageType = "Image" function.Timeout = LAMBDA_TIMEOUT function.MemorySize = LAMBDA_MEMORY_SIZE function.Role = GetAtt(iam_assume_role_name(name), "Arn") function.Code = function_code return function
def _get_cloudformation_template_with_test_setup(config, task_kwargs): """ A helper function for all of the tests that use the deployer cloudformation template Args: config (Config): The config object to use with the Chili-Pepper App task_kwargs (Dict[str, Object]): Kwargs to pass to app.tasks """ app = ChiliPepper().create_app(app_name="test_get_cloudformation_template", config=config) test_bucket_name = "my_test_bucket" app.conf["aws"]["bucket_name"] = test_bucket_name if "runtime" not in app.conf["aws"]: app.conf["aws"]["runtime"] = "python3.7" @app.task(**task_kwargs) def say_hello(event, context): # moto doesn't handle returns from lambda functions :( print("Hello!") deployer = Deployer(app=app) code_argument = awslambda.Code( S3Bucket=test_bucket_name, S3Key="{app_name}DeploymentPackage".format(app_name=app.app_name)) cloudformation_template = deployer._get_cloudformation_template( code_argument) template_resources = cloudformation_template.resources function_role = template_resources["FunctionRole"] assert type(function_role) == iam.Role assert function_role.ManagedPolicyArns == [ "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" ] assert function_role.AssumeRolePolicyDocument == awacs.aws.Policy( Statement=[ awacs.aws.Statement(Effect=awacs.aws.Allow, Action=[awacs.sts.AssumeRole], Principal=awacs.aws.Principal( "Service", ["lambda.amazonaws.com"])) ]) say_hello_task = template_resources["TestsUnitTestDeployerSayHello"] assert type(say_hello_task) == awslambda.Function assert say_hello_task.Code == code_argument assert say_hello_task.Handler == "tests.unit.test_deployer.say_hello" assert len(template_resources) == 2 return cloudformation_template
def build_subscription(self, t, topic): policy = t.add_resource( iam.Role( "{}SlackSNSRole".format(self.name), AssumeRolePolicyDocument=aws.Policy(Statement=[ aws.Statement(Action=[awacs.sts.AssumeRole], Effect=aws.Allow, Principal=aws.Principal( "Service", ["lambda.amazonaws.com"])) ]), Path="/", Policies=[ iam.Policy( PolicyName='snspublic', PolicyDocument=aws.PolicyDocument(Statement=[ aws.Statement(Effect=aws.Allow, Action=[ awacs.sns.Publish, awacs.logs.PutLogEvents, awacs.logs.CreateLogGroup, awacs.logs.CreateLogStream, ], Resource=["*"]) ])) ], ManagedPolicyArns=[ # "arn:aws:iam::aws:policy/AdministratorAccess" ])) code = ["import sys"] # make lambda function fn = t.add_resource( awslambda.Function('{}SlackTopicFN'.format(self.name), Handler='index.handle', Runtime='python3.6', Role=GetAtt(policy, "Arn"), Code=awslambda.Code(ZipFile=Join("", code)))) t.add_resource( awslambda.Permission('{}LambdaPerm'.format(self.name), Action='lambda:InvokeFunction', FunctionName=GetAtt(fn, "Arn"), SourceArn=Ref(topic), Principal="sns.amazonaws.com")) return ("lambda", GetAtt(fn, "Arn"))
def show_function(show_lambda_role, lambda_environment_dict, showeach_topic, s3_bucket, s3_key_value, show_s3_versionid_value): env = { 'SHOWEACH_TOPIC': Ref(showeach_topic), } env.update(lambda_environment_dict) return awslambda.Function('ShowFunction', Code=awslambda.Code( S3Bucket=Ref(s3_bucket), S3Key=Join('', [s3_key_value, '/show.zip']), S3ObjectVersion=show_s3_versionid_value), Environment=awslambda.Environment(Variables=env), Handler='show.lambda_handler', Role=GetAtt(show_lambda_role, 'Arn'), Runtime='python3.6', Timeout=60 * 5)
def archive_function(archive_lambda_role, lambda_environment_dict, archiveeach_topic, s3_bucket, s3_key_value, archive_s3_versionid_value): env = { 'ARCHIVEEACH_TOPIC': Ref(archiveeach_topic), } env.update(lambda_environment_dict) return awslambda.Function( 'ArchiveFunction', Code=awslambda.Code(S3Bucket=Ref(s3_bucket), S3Key=Join('', [s3_key_value, '/archive.zip']), S3ObjectVersion=archive_s3_versionid_value), Environment=awslambda.Environment(Variables=env), Handler='archive.lambda_handler', Role=GetAtt(archive_lambda_role, 'Arn'), Runtime='python3.6', Timeout=60 * 5)
def ami_lookup(template): name = 'AMILookup' role = make_role(name, template, 'lambda.amazonaws.com', describe_images, logs_writer) with open(os.path.join(os.path.dirname(__file__), 'function.js')) as f: func = awslambda.Function( name, template, Code=awslambda.Code(ZipFile=Join('', list(f.readlines()))), Handler='index.handler', Role=GetAtt(role, 'Arn'), Runtime='nodejs', Timeout=30) return cf.CustomResource('AMI', template, ServiceToken=GetAtt(func, 'Arn'), Region=Ref('AWS::Region'), Architecture='HVM64')
def lambda_adder(self, nameref, role, condition, **kwargs): try: lambda_func = awslambda.Function( nameref, DependsOn=role, Code=awslambda.Code(S3Bucket=kwargs['s3_bucket'], S3Key=kwargs['s3_key'], S3ObjectVersion=If(condition, Ref("AWS::NoValue"), Ref("LambdaVersion"))), MemorySize=kwargs['memory'], Role=GetAtt(role, "Arn"), Handler=kwargs['handler'], Timeout=kwargs['timeout'], FunctionName=kwargs['name'], Runtime=kwargs['runtime']) self.template.add_resource(lambda_func) return lambda_func except Exception as e: print e print "have you set all the values in your config file?"
def _send_deployment_package_to_s3(self, deployment_package_path): # type: (Path) -> awslambda.Code # TODO verify that bucket has versioning enabled # TODO do not push a new zip if it is identical to the current version # TODO do not push a new zip if it is identical to an old version - just use the old version s3_key = self._app.app_name + "_deployment_package.zip" self._logger.info("Sending deployment package to s3. bucket: '" + self._app.bucket_name + "'. key: '" + s3_key + "'.") s3_client = boto3.client("s3") s3_response = s3_client.put_object( Bucket=self._app.bucket_name, Key=s3_key, Body=deployment_package_path.read_bytes()) self._logger.info("Done sending deployment package to s3. bucket: '" + self._app.bucket_name + "'. key: '" + s3_key + "'.") return awslambda.Code(S3Bucket=self._app.bucket_name, S3Key=s3_key, S3ObjectVersion=s3_response["VersionId"])
def add_lambda_function(self): role = self.add_resource( iam.Role( 'TerraformRegistryLambdaRole', AssumeRolePolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement(Effect=Allow, Action=[Action('sts', 'AssumeRole')], Principal=Principal('Service', 'lambda.amazonaws.com')) ]), ManagedPolicyArns=[ 'arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole' ], Policies=[ iam.Policy( PolicyName='Registry', PolicyDocument=PolicyDocument(Statement=[ Statement(Effect=Allow, Action=[Action('s3', '*')], Resource=[ GetAtt(self._bucket, 'Arn'), Join('', [ GetAtt(self._bucket, 'Arn'), '/*' ]) ]), Statement(Effect=Allow, Action=[Action('dynamodb', '*')], Resource=[ GetAtt(self._api_token_table, 'Arn') ]) ])) ])) lambda_function = self.add_resource( awslambda.Function( 'TerraformRegistry', Runtime='python3.7', Code=awslambda.Code(S3Bucket=LAMBDA_PACKAGE_BUCKET, S3Key=f'{self._build_version}/lambda.zip'), Handler='registry.handler', Timeout=300, Role=GetAtt(role, 'Arn'), Description=Sub('${AWS::StackName} Terraform Registry'), Environment=awslambda.Environment( Variables={ 'TerraformModules': Ref(self._bucket), 'ApiTokens': Ref(self._api_token_table) }))) aws_sha256, hex_sha256 = sha256('build/lambda.zip') version_name = 'TerraformRegistryVersion' + hex_sha256 self._lambda_function = self.add_resource( awslambda.Version(version_name, CodeSha256=aws_sha256, Description=hex_sha256, FunctionName=Ref(lambda_function), DependsOn=[lambda_function], DeletionPolicy=Retain))
PolicyName="LambdaPolicy", PolicyDocument=aws.Policy(Statement=[ aws.Statement(Effect=aws.Allow, Action=[ aws.Action("logs", "CreateLogGroup"), aws.Action("logs", "CreateLogStream"), aws.Action("logs", "PutLogEvents"), ], Resource=["arn:aws:logs:*:*:*"]) ])) ])) lambda_function = template.add_resource( awslambda.Function("Lambda", Code=awslambda.Code( S3Bucket=Ref(param_lambda_source_bucket), S3Key=Ref(param_lambda_file_name)), Handler="lambda.lambda_handler", MemorySize=128, Role=GetAtt(lambda_role, "Arn"), Runtime="python2.7", Timeout=30)) api = template.add_resource( apigateway.RestApi("API", Description="My API", Name="MyAPI")) api_lambda_permission = template.add_resource( awslambda.Permission("APILambdaPermission", Action="lambda:InvokeFunction", FunctionName=Ref(lambda_function), Principal="apigateway.amazonaws.com",
def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() template.set_version('2010-09-09') template.set_description('Static Website - Bucket and Distribution') # Conditions template.add_condition( 'AcmCertSpecified', And(Not(Equals(variables['AcmCertificateArn'].ref, '')), Not(Equals(variables['AcmCertificateArn'].ref, 'undefined'))) ) template.add_condition( 'AliasesSpecified', And(Not(Equals(Select(0, variables['Aliases'].ref), '')), Not(Equals(Select(0, variables['Aliases'].ref), 'undefined'))) ) template.add_condition( 'CFLoggingEnabled', And(Not(Equals(variables['LogBucketName'].ref, '')), Not(Equals(variables['LogBucketName'].ref, 'undefined'))) ) template.add_condition( 'DirectoryIndexSpecified', And(Not(Equals(variables['RewriteDirectoryIndex'].ref, '')), Not(Equals(variables['RewriteDirectoryIndex'].ref, 'undefined'))) # noqa ) template.add_condition( 'WAFNameSpecified', And(Not(Equals(variables['WAFWebACL'].ref, '')), Not(Equals(variables['WAFWebACL'].ref, 'undefined'))) ) # Resources oai = template.add_resource( cloudfront.CloudFrontOriginAccessIdentity( 'OAI', CloudFrontOriginAccessIdentityConfig=cloudfront.CloudFrontOriginAccessIdentityConfig( # noqa pylint: disable=line-too-long Comment='CF access to website' ) ) ) bucket = template.add_resource( s3.Bucket( 'Bucket', AccessControl=s3.Private, LifecycleConfiguration=s3.LifecycleConfiguration( Rules=[ s3.LifecycleRule( NoncurrentVersionExpirationInDays=90, Status='Enabled' ) ] ), VersioningConfiguration=s3.VersioningConfiguration( Status='Enabled' ), WebsiteConfiguration=s3.WebsiteConfiguration( IndexDocument='index.html', ErrorDocument='error.html' ) ) ) template.add_output(Output( 'BucketName', Description='Name of website bucket', Value=bucket.ref() )) allowcfaccess = template.add_resource( s3.BucketPolicy( 'AllowCFAccess', Bucket=bucket.ref(), PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Action=[awacs.s3.GetObject], Effect=Allow, Principal=Principal( 'CanonicalUser', oai.get_att('S3CanonicalUserId') ), Resource=[ Join('', [bucket.get_att('Arn'), '/*']) ] ) ] ) ) ) cfdirectoryindexrewriterole = template.add_resource( iam.Role( 'CFDirectoryIndexRewriteRole', Condition='DirectoryIndexSpecified', AssumeRolePolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Action=[awacs.sts.AssumeRole], Principal=Principal('Service', ['lambda.amazonaws.com', 'edgelambda.amazonaws.com']) ) ] ), ManagedPolicyArns=[ IAM_ARN_PREFIX + 'AWSLambdaBasicExecutionRole' ] ) ) cfdirectoryindexrewrite = template.add_resource( awslambda.Function( 'CFDirectoryIndexRewrite', Condition='DirectoryIndexSpecified', Code=awslambda.Code( ZipFile=Join( '', ["'use strict';\n", "exports.handler = (event, context, callback) => {\n", "\n", " // Extract the request from the CloudFront event that is sent to Lambda@Edge\n", # noqa pylint: disable=line-too-long " var request = event.Records[0].cf.request;\n", " // Extract the URI from the request\n", " var olduri = request.uri;\n", " // Match any '/' that occurs at the end of a URI. Replace it with a default index\n", # noqa pylint: disable=line-too-long " var newuri = olduri.replace(/\\/$/, '\\/", variables['RewriteDirectoryIndex'].ref, "');\n", # noqa " // Log the URI as received by CloudFront and the new URI to be used to fetch from origin\n", # noqa pylint: disable=line-too-long " console.log(\"Old URI: \" + olduri);\n", " console.log(\"New URI: \" + newuri);\n", " // Replace the received URI with the URI that includes the index page\n", # noqa pylint: disable=line-too-long " request.uri = newuri;\n", " // Return to CloudFront\n", " return callback(null, request);\n", "\n", "};\n"] ) ), Description='Rewrites CF directory HTTP requests to default page', # noqa Handler='index.handler', Role=cfdirectoryindexrewriterole.get_att('Arn'), Runtime='nodejs8.10' ) ) # Generating a unique resource name here for the Lambda version, so it # updates automatically if the lambda code changes code_hash = hashlib.md5( str(cfdirectoryindexrewrite.properties['Code'].properties['ZipFile'].to_dict()).encode() # noqa pylint: disable=line-too-long ).hexdigest() cfdirectoryindexrewritever = template.add_resource( awslambda.Version( 'CFDirectoryIndexRewriteVer' + code_hash, Condition='DirectoryIndexSpecified', FunctionName=cfdirectoryindexrewrite.ref() ) ) # If custom associations defined, use them if variables['lambda_function_associations']: lambda_function_associations = [ cloudfront.LambdaFunctionAssociation( EventType=x['type'], LambdaFunctionARN=x['arn'] ) for x in variables['lambda_function_associations'] ] else: # otherwise fallback to pure CFN condition lambda_function_associations = If( 'DirectoryIndexSpecified', [cloudfront.LambdaFunctionAssociation( EventType='origin-request', LambdaFunctionARN=cfdirectoryindexrewritever.ref() )], NoValue ) cfdistribution = template.add_resource( get_cf_distribution_class()( 'CFDistribution', DependsOn=allowcfaccess.title, DistributionConfig=get_cf_distro_conf_class()( Aliases=If( 'AliasesSpecified', variables['Aliases'].ref, NoValue ), Origins=[ get_cf_origin_class()( DomainName=Join( '.', [bucket.ref(), 's3.amazonaws.com']), S3OriginConfig=get_s3_origin_conf_class()( OriginAccessIdentity=Join( '', ['origin-access-identity/cloudfront/', oai.ref()]) ), Id='S3Origin' ) ], DefaultCacheBehavior=cloudfront.DefaultCacheBehavior( AllowedMethods=['GET', 'HEAD'], Compress=False, DefaultTTL='86400', ForwardedValues=cloudfront.ForwardedValues( Cookies=cloudfront.Cookies(Forward='none'), QueryString=False, ), LambdaFunctionAssociations=lambda_function_associations, # noqa TargetOriginId='S3Origin', ViewerProtocolPolicy='redirect-to-https' ), DefaultRootObject='index.html', Logging=If( 'CFLoggingEnabled', cloudfront.Logging( Bucket=Join('.', [variables['LogBucketName'].ref, 's3.amazonaws.com']) ), NoValue ), PriceClass=variables['PriceClass'].ref, Enabled=True, WebACLId=If( 'WAFNameSpecified', variables['WAFWebACL'].ref, NoValue ), ViewerCertificate=If( 'AcmCertSpecified', cloudfront.ViewerCertificate( AcmCertificateArn=variables['AcmCertificateArn'].ref, # noqa SslSupportMethod='sni-only' ), NoValue ) ) ) ) template.add_output(Output( 'CFDistributionId', Description='CloudFront distribution ID', Value=cfdistribution.ref() )) template.add_output( Output( 'CFDistributionDomainName', Description='CloudFront distribution domain name', Value=cfdistribution.get_att('DomainName') ) )
"Service": ["lambda.amazonaws.com"] } }] }, )) lambda_stream_to_firehose = template.add_resource( awslambda.Function( 'KinesisStreamToFirehose', FunctionName=cfg['lambda_function_name'], Description= 'Lambda function to read kinesis stream and put to firehose', Handler='lambda_function.lambda_handler', Role=GetAtt('ExecutionRole', 'Arn'), Code=awslambda.Code( S3Bucket=cfg['s3_deployment_bucket'], S3Key=cfg['s3_key_lambda'], ), Runtime='python3.6', Timeout=cfg['lambda_timeout'], MemorySize=cfg['lambda_memory_size'], Environment=awslambda.Environment( 'LambdaVars', Variables={ 'DELIVERY_STREAM': cfg['kinesis_delivery_stream_name'], 'ADD_NEWLINE': 'True' }))) add_kinesis_trigger_for_lambda = template.add_resource( awslambda.EventSourceMapping( 'KinesisLambdaTrigger', BatchSize=cfg['lambda_batch_size'],
from awacs.aws import Policy, Allow, Action, Statement, Principal t = Template() t.add_description("lambdas") t.add_version("2010-09-09") access_stack = t.add_parameter( Parameter("AccessStack", Type="String", Description="Access stack name", Default="access")) bless = t.add_resource( awslambda.Function("Bless", Code=awslambda.Code(S3Bucket=ImportValue( Sub("${AccessStack}-LambdaBucket")), S3Key="bless_lambda.zip"), FunctionName="blessapi", Handler="lambda_handler.lambda_handler", MemorySize="128", Role=ImportValue(Sub("${AccessStack}-BlessRole")), Runtime="python2.7", Timeout=300)) t.add_output( Output(bless.title, Value=GetAtt(bless, "Arn"), Export=Export(Sub("${AWS::StackName}-" + bless.title)))) print t.to_json()
"Service": ["lambda.amazonaws.com"] }, "Action": ["sts:AssumeRole"] } }, Policies=[sns_publish_policy, lambda_logging_policy]) with open("lambda_handler.py", 'r') as fin: code = fin.read() lambda_function = awslambda.Function( "LambdaHandler", FunctionName=f"lambda-handler-contact-form-{cdomain}", Description=f"Lambda function for contact form on {domain}", Environment=lambda_environment, Code=awslambda.Code(ZipFile=code), Handler="index.lambda_handler", Runtime="python3.7", MemorySize=128, Role=GetAtt(role, "Arn")) ### API Gateway api = apigatewayv2.Api("HttpApi", Name=f"api-contact-form-{cdomain}", Description=f"API Gateway for contact form on {domain}", ProtocolType="HTTP", Target=GetAtt(lambda_function, "Arn")) api_gateway_lambda_permission = awslambda.Permission( "ApiGatewayLambdaPermission", Action="lambda:InvokeFunction",
lambda_role = t.add_parameter( Parameter('LambdaRole', Type='String', Description='Lambda Role')) bucket_name = t.add_parameter( Parameter('BucketName', Type='String', Description='Lambda Code Bucket')) time_token = t.add_parameter( Parameter('TimeToken', Type='String', Description='Time Token for last upload')) lambda_function = t.add_resource( awslambda.Function("reds", Code=awslambda.Code( S3Bucket=Ref(bucket_name), S3Key=Join( "", ["reds-", Ref(time_token), ".zip"])), Handler="reds.lambda_handler", MemorySize=128, Role=Join('', [ 'arn:aws:iam::', Ref("AWS::AccountId"), ':role/', Ref(lambda_role) ]), Runtime="python2.7", Timeout=30)) t.add_output([ Output( 'LambdaFunction', Description='ReDS Lambda Function',
)) template.set_parameter_label(param_config_bucket, "Lambda Config S3 bucket") cloudformation_tags = template.add_resource( custom_resources.cloudformation.Tags( "CfnTags", Set={ 'ConfigBucket': Ref(param_config_bucket), }, )) validator_lambda = template.add_resource( awslambda.Function( "ValidatorLambda", Code=awslambda.Code( S3Bucket=Ref(param_s3_bucket_name), S3Key=Ref(param_s3_key), ), Runtime='nodejs8.10', Handler='index.handler', Role=Ref(param_role), Tags=GetAtt(cloudformation_tags, 'TagList'), )) validator_version = template.add_resource( custom_resources.awslambda.Version( "ValidatorVersion", FunctionName=Ref(validator_lambda), Dummy=Ref(param_s3_key), # Trigger update on function update )) template.add_output(
aws. Action('kms', 'Create*' ), # Don't ask me why this is needed... aws.Action('kms', 'DescribeKey'), ], Resource=[Ref(kms_key_parameter)]), ), ])) ])) backup_rds_function = template.add_resource( awslambda.Function( 'LambdaBackupRDSFunction', Description='Copies RDS backups to another region', Code=awslambda.Code( S3Bucket=Ref(s3_bucket_parameter), S3Key=Ref(source_zip_parameter), ), Handler='backup-rds.lambda_handler', MemorySize=128, Role=GetAtt(backup_rds_role, 'Arn'), Runtime='python3.6', Timeout=30, Environment=awslambda.Environment( Variables={ 'SOURCE_REGION': Ref(AWS_REGION), 'TARGET_REGION': Ref(target_region_parameter), 'KMS_KEY_ID': Ref(kms_key_parameter), 'CLUSTERS_TO_USE': Ref(clusters_to_use_parameter) }))) # SNS topic for event subscriptions
def register_resources_template(self, template): """Register the lambda Function into the troposphere template. If this function requires a custom Role, register it too.""" role = self.get_role() depends_on = [] if isinstance(role, iam.Role): template.add_resource(role) depends_on.append(role.name) role = troposphere.GetAtt(role, 'Arn') template.add_parameter( troposphere.Parameter( utils.valid_cloudformation_name(self.name, "s3version"), Type="String", )) extra = {} if self.settings.get('vpc'): vpc = self.project.get_resource('vpc::{}'.format( self.settings.get('vpc'))) if isinstance(vpc.settings['security-groups'], troposphere.Ref): vpc.settings[ 'security-groups']._type = 'List<AWS::EC2::SecurityGroup::Id>' if isinstance(vpc.settings['subnet-ids'], troposphere.Ref): vpc.settings['subnet-ids']._type = 'List<AWS::EC2::Subnet::Id>' extra['VpcConfig'] = awslambda.VPCConfig( SecurityGroupIds=vpc.settings['security-groups'], SubnetIds=vpc.settings['subnet-ids']) function = template.add_resource( awslambda.Function(self.in_project_cf_name, DependsOn=depends_on, Code=awslambda.Code( S3Bucket=troposphere.Ref("CodeBucket"), S3Key=self.get_bucket_key(), S3ObjectVersion=troposphere.Ref( utils.valid_cloudformation_name( self.name, "s3version")), ), Description=self.settings.get( 'description', ''), Handler=self.get_handler(), MemorySize=self.get_memory(), Role=role, Runtime=self.get_runtime(), Timeout=self.get_timeout(), **extra)) lambda_version = 'lambda:contrib_lambdas:version' lambda_ref = troposphere.GetAtt(self.project.reference(lambda_version), 'Arn') if not self.in_project_name.startswith('lambda:contrib_lambdas:'): lambda_version = '{}:current'.format(lambda_version) lambda_ref = troposphere.Ref( self.project.reference(lambda_version)) version = template.add_resource( LambdaVersion.create_with( utils.valid_cloudformation_name(self.name, "Version"), DependsOn=[ self.project.reference(lambda_version), function.name ], lambda_arn=lambda_ref, FunctionName=troposphere.Ref(function), S3ObjectVersion=troposphere.Ref( utils.valid_cloudformation_name(self.name, "s3version")), )) alias = template.add_resource( awslambda.Alias( self.current_alias_cf_name, DependsOn=[version.name], FunctionName=troposphere.Ref(function), FunctionVersion=troposphere.GetAtt(version, "Version"), Name="current", )) if self._get_true_false('cli-output', 't'): template.add_output([ troposphere.Output( utils.valid_cloudformation_name("Clioutput", self.in_project_name), Value=troposphere.Ref(alias), ) ])
class CustomResource(AWSCustomObject): resource_type = "Custom::CustomResourceTest" props = {'ServiceToken': (str, True)} custom_resource_test_lambda = template.add_resource( awslambda.Function( 'CustomResourceLambda', FunctionName='custom_resource', Description='Custom Resource Test lambda with Python 3.6', Handler='lambda_function.lambda_handler', Role=GetAtt('ExecutionRole', 'Arn'), Code=awslambda.Code( S3Bucket='nicor-dev', S3Key='deployments/lambdas/travis_build/custom_resource.zip', ), Runtime='python3.6', Timeout='30', MemorySize=128)) custom_resource = template.add_resource( CustomResource('CustomResource', DependsOn='CustomResourceLambda', ServiceToken=GetAtt(custom_resource_test_lambda, 'Arn'))) template.add_output([ Output('LambdaExecutionRole', Description='Lambdas Execution role', Value=Ref(lambda_execution_role)) ])
awacs.ec2.RevokeSecurityGroupIngress, ], Resource=['*']) ])) ])) update_security_groups_function = t.add_resource( awslambda.Function( 'UpdateSecurityGroups', Description='Update Beanstalk Environment SecurityGroup', # code=awslambda.code( # S3Bucket=Sub('aws-lambda-code-${AWS::AccountId}-${AWS::Region}'), # S3Key='update_security_groups.py.zip' # ), Code=awslambda.Code(ZipFile=cfnutil.load_python_lambda( 'lambdas/update_security_groups.py')), Environment=awslambda.Environment( Variables={'REGIONS': Ref(param_regions)}), Handler='index.lambda_handler', Role=GetAtt(update_function_execution_role, 'Arn'), Runtime='python2.7', MemorySize='128', Timeout='300', )) ip_space_changed_topic = 'arn:aws:sns:us-east-1:806199016981:AmazonIpSpaceChanged' update_function_lambda_permission = t.add_resource( awslambda.Permission('LambdaPermission', FunctionName=Ref(update_security_groups_function), Action='lambda:InvokeFunction',