def initialize_template(self): template = Template() ## Apply a transform to use serverless functions. template.set_transform("AWS::Serverless-2016-10-31") ## Make role for custom resources. ## Initialize the resources necessary to make directories. ## First get the trust agreement: with open('policies/lambda_role_assume_role_doc.json', "r") as f: mkdirassume_role_doc = json.load(f) ## Base lambda policy base_policy = lambda_basepolicy("LambdaBaseRole") ## Write permissions for lambda to s3 write_policy = lambda_writeS3('LambdaWriteS3Policy') ## template.add_resource(base_policy) mkdirrole = Role("S3MakePathRole", AssumeRolePolicyDocument=mkdirassume_role_doc, ManagedPolicyArns=[Ref(base_policy)], Policies=[write_policy]) mkdirrole_attached = template.add_resource(mkdirrole) ## Get the lambda config parameters for initialization of the custom resource delete function [needs the region] lambdaconfig = self.config['Lambda']['LambdaConfig'] ## Now we need to write a lambda function that actually does the work: mkfunction = Function("S3PutObjectFunction", CodeUri="../../protocols", Description="Puts Objects in S3", Handler="helper.handler_mkdir", Environment=Environment(Variables=lambdaconfig), Role=GetAtt(mkdirrole_attached, "Arn"), Runtime="python3.6", Timeout=30) mkfunction_attached = template.add_resource(mkfunction) delfunction = Function("S3DelObjectFunction", CodeUri="../../protocols", Description="Deletes Objects from S3", Handler="helper.handler_delbucket", Environment=Environment(Variables=lambdaconfig), Role=GetAtt(mkdirrole_attached, "Arn"), Runtime="python3.6", Timeout=30) delfunction_attached = template.add_resource(delfunction) ## Custom resource to delete. delresource = CustomResource('DeleteCustomResource', ServiceToken=GetAtt( delfunction_attached, "Arn"), BucketName=self.config['PipelineName'], DependsOn='PipelineMainBucket') template.add_resource(delresource) ## We can add other custom resource initializations in the future return template, mkfunction_attached, delfunction_attached
def make_folder_custom_resource(self, bucketname, pathname, dirname): ## 1. Make a role for the lambda function to take on. ## First handle policies: ## Assume role policy doc: with open('policies/lambda_role_assume_role_doc.json', "r") as f: assume_role_doc = json.load(f) ## Base lambda policy base_policy = lambda_basepolicy("LambdaBaseRole") ## Write permissions for lambda to s3 write_policy = lambda_writeS3('LambdaWriteS3Policy') ## self.template.add_resource(base_policy) role = Role("S3MakePathRole", AssumeRolePolicyDocument=assume_role_doc, ManagedPolicyArns=[Ref(base_policy)], Policies=[write_policy]) self.template.add_resource(role) ## Now we need to write a lambda function that actually does the work: function = Function("S3PutObjectFunction", CodeUri="../lambda_repo", Description="Puts Objects in S3", Handler="helper.handler_mkdir", Role=GetAtt(role, "Arn"), Runtime="python3.6", Timeout=30) self.template.add_resource(function) ## Finally, we declare a custom resource that makes use of this lambda function. foldermake = CustomResource('S3PutObject', ServiceToken=GetAtt(function, "Arn"), BucketName=self.affiliatename, Path=pathname, DirName=dirname) self.template.add_resource(foldermake)
Parameter('pBigIPRouteTableId', Description='BIG-IP route table id for HA', Type='String')) pBigIPInterface = template.add_parameter( Parameter('pBigIPInterface', Description='BIG-IP interface for HA', Type='String')) pBigIPS3Bucket = template.add_parameter( Parameter('pBigIPS3Bucket', Description='BIG-IP S3 bucket where password is stored', Type='String')) custom = template.add_resource( CustomResource( 'CustomLambdaExec', # ServiceToken=GetAtt('install-lambda-test-ha-iapp', 'Arn'), ServiceToken=Sub( 'arn:aws:lambda:${AWS::Region}:${AWS::AccountId}:function:${pPrefix}-ha-iapp' ), mgmt_ip=Ref('pBigIPMgmt'), iapp_url=Ref('piAppUrl'), route_table_id=Ref('pBigIPRouteTableId'), interface=Ref('pBigIPInterface'), s3_bucket=Ref('pBigIPS3Bucket'))) # print(template.to_json()) f = open('ha_iapp.json', "w+") f.write(template.to_json()) f.close()
)) MyEC2Instance = t.add_resource( Instance( "MyEC2Instance", SecurityGroupIds=GetAtt("AllSecurityGroups", "Value"), InstanceType=Ref(InstanceType), ImageId=FindInMap( "AWSRegionArch2AMI", Ref("AWS::Region"), FindInMap("AWSInstanceType2Arch", Ref(InstanceType), "Arch")), )) AllSecurityGroups = t.add_resource( CustomResource( "AllSecurityGroups", List=Ref(ExistingSecurityGroups), AppendedItem=Ref("SecurityGroup"), ServiceToken=GetAtt(AppendItemToListFunction, "Arn"), )) SecurityGroup = t.add_resource( SecurityGroup( "SecurityGroup", SecurityGroupIngress=[{ "ToPort": "80", "IpProtocol": "tcp", "CidrIp": "0.0.0.0/0", "FromPort": "80" }], VpcId=Ref(ExistingVPC), GroupDescription="Allow HTTP traffic to the host", SecurityGroupEgress=[{
def global_acm_resources(template, acm_properties): """ Add a function to create a global ACM cert and invoke it for each object in acm_properties. The obejcts in acp_properties should contain the same properties as when creating a normal ACM certificate :type acm_properties list :rtype tuple """ with open(module_path + '/lambda_code/global_acm.py', 'r') as file: acm_code = file.read() write_logs_policy = template.add_resource(ManagedPolicy( "WriteLogsPolicy", Description='Allow Creating Log Groups, Log Streams and putting logs ' 'in it', PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": [ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents" ], "Resource": "arn:aws:logs:*:*:*" }], }, )) global_acm_function_role = template.add_resource(Role( "GlobalAcmFunctionRole", AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "Service": "lambda.amazonaws.com" }, "Action": "sts:AssumeRole" }] }, ManagedPolicyArns=[ Ref(write_logs_policy), 'arn:aws:iam::aws:policy/AWSCertificateManagerFullAccess' ], )) global_acm_function = template.add_resource(Function( "GlobalAcmFunction", Code=Code( ZipFile=acm_code, ), Description=Join('', [Ref(AWS_STACK_NAME), ' query service catalog']), Handler='index.lambda_handler', Role=GetAtt(global_acm_function_role, 'Arn'), Runtime='python2.7', )) output_resources = [] for i, property in enumerate(acm_properties): resource = template.add_resource(CustomResource( "GlobalAcmNr{}".format(i), ServiceToken=GetAtt(global_acm_function, 'Arn'), DomainName=property['DomainName'], DomainValidationOptions=property['DomainValidationOptions'], SubjectAlternativeNames=property['SubjectAlternativeNames'], )) output_resources.append(resource) return tuple(output_resources)
"DefaultProcessesKey": environment.get_default_processes_key(), "HyP3StackName": Ref("AWS::StackName"), "ParamNameHyP3Username": ssm_hyp3_api_username_param_name, "ParamNameHyP3ApiKey": ssm_hyp3_api_key_param_name } ), "Timeout": 60 } )) db_setup = t.add_resource(CustomResource( "RunDBSetup", ServiceToken=GetAtt(setup_db, "Arn"), # This is to always run the setup_db function on template updates. # Cloudformation only updates resources that change in the template. ForceUpdateId=str(uuid.uuid4()) )) t.add_output(Output( "HyP3Username", Description="HyP3 username", Value=GetAtt(db_setup, 'HyP3Username') )) t.add_output(Output( "HyP3ApiKey", Description="Api key for hyp3 access", Value=GetAtt(db_setup, 'HyP3ApiKey') ))
resources = { "PipelineNotificationsTopic": Topic( "PipelineNotificationsTopic", Condition="IsProdStage", DisplayName=Sub("${AppName}-notifications-${AWS::Region}"), ), "DynamicPipelineCleanupDev": CustomResource( "DynamicPipelineCleanupDev", Version="1.0", ServiceToken=Ref("DynamicPipelineCleanupLambdaArn"), RoleArn=Sub("arn:aws:iam::${DevAwsAccountId}:role/CodePipelineServiceRole-${AWS::Region}-${" "DevAwsAccountId}-dev"), Region=Ref("AWS::Region"), StackName=If("IsProdStage", Sub("${AppName}-dev"), Sub("${AppName}-dev-${Suffix}") ) ), "DynamicPipelineCleanupProd": CustomResource( "DynamicPipelineCleanupProd", Condition="IsProdStage", Version="1.0", ServiceToken=Ref("DynamicPipelineCleanupLambdaArn"), RoleArn=Sub("arn:aws:iam::${DevAwsAccountId}:role/CodePipelineServiceRole-${AWS::Region}-${" "DevAwsAccountId}-dev"), Region=Ref("AWS::Region"),
def create_template(): template = Template(Description='DNS Validated ACM Certificate Example') template.add_version() lambda_role = template.add_resource( iam.Role( 'CustomAcmCertificateLambdaExecutionRole', AssumeRolePolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement(Effect=Allow, Action=[Action('sts', 'AssumeRole')], Principal=Principal('Service', 'lambda.amazonaws.com')) ], ), Path="/", ManagedPolicyArns=[ 'arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole', 'arn:aws:iam::aws:policy/service-role/AWSLambdaRole' ], Policies=[ iam.Policy( 'CustomAcmCertificateLambdaPolicy', PolicyName=Join('', [ Ref(AWS_STACK_NAME), 'CustomAcmCertificateLambdaExecutionPolicy' ]), PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Action=[ Action('acm', 'AddTagsToCertificate'), Action('acm', 'DeleteCertificate'), Action('acm', 'DescribeCertificate'), Action('acm', 'RemoveTagsFromCertificate'), Action('acm', 'RequestCertificate') ], Resource=[ Join('', [ 'arn:aws:acm:', Ref(AWS_REGION), ':', Ref(AWS_ACCOUNT_ID), ':certificate/*' ]) ]), Statement( Effect=Allow, Action=[Action('acm', 'RequestCertificate')], Resource=['*']), Statement( Effect=Allow, Action=[ Action('route53', 'ChangeResourceRecordSets') ], Resource=['arn:aws:route53:::hostedzone/*']) ]), ) ], )) with open('certificate_min.py', 'r') as f: code = f.read() certificate_lambda = template.add_resource( awslambda.Function( 'CustomAcmCertificateLambda', Code=awslambda.Code(ZipFile=code), Runtime='python3.6', Handler='index.handler', Timeout=300, Role=GetAtt(lambda_role, 'Arn'), Description= 'Cloudformation custom resource for DNS validated certificates', Metadata={ 'Source': 'https://github.com/dflook/cloudformation-dns-certificate', 'Version': '1.2.0' })) certificate = template.add_resource( CustomResource('ExampleCertificate', ServiceToken=GetAtt(certificate_lambda, 'Arn'), ValidationMethod='DNS', DomainName='test.example.com', DomainValidationOptions=[{ 'DomainName': 'test.example.com', 'HostedZoneId': 'Z2KZ5YTUFZNC7H' }], Tags=[{ 'Key': 'Name', 'Value': 'Example Certificate' }])) template.add_output( Output("CertificateARN", Value=Ref(certificate), Description="The ARN of the example certificate")) return template
def create_template(): template = Template( Description="Stable availability zone discovery utility") deployment_id = template.add_parameter( Parameter( "DeploymentId", Type="String", )) image_uri = template.add_parameter(Parameter( "ImageUri", Type="String", )) role = template.add_resource( Role( "Role", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[sts.AssumeRole], Principal=Principal("Service", "lambda.amazonaws.com"), ), ], ), )) function, alias = common.add_versioned_lambda( template, Ref(deployment_id), Function( "Function", MemorySize=256, Timeout=30, Role=GetAtt(role, "Arn"), PackageType="Image", Code=Code(ImageUri=Ref(image_uri), ), ImageConfig=ImageConfig(Command=[ Join(":", (handler.__module__, handler.__name__)), ], ), ), ) log_group = template.add_resource( LogGroup( "LogGroup", LogGroupName=Join("/", ["/aws/lambda", Ref(function)]), RetentionInDays=common.LOG_RETENTION_DAYS, )) policy = template.add_resource( PolicyType( "Policy", PolicyName=Ref(role), PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[logs.PutLogEvents, logs.CreateLogStream], Resource=[GetAtt(log_group, "Arn")], ), Statement( Effect=Allow, Action=[ec2.DescribeAvailabilityZones], Resource=["*"], ), ], ), Roles=[Ref(role)], )) availability_zones = template.add_resource( CustomResource( "AvailabilityZones", ServiceToken=Ref(alias), DeploymentId=Ref(deployment_id), DependsOn=[policy], )) template.add_output( Output( "AvailabilityZones", Value=Ref(availability_zones), )) return template
def create_template(): template = Template(Description="ECR image tagger utility") deployment_id = template.add_parameter( Parameter( "DeploymentId", Type="String", ) ) artifact_repository = template.add_parameter( Parameter( "ArtifactRepository", Type="String", ) ) image_digest = template.add_parameter( Parameter( "ImageDigest", Type="String", ) ) desired_image_tag = template.add_parameter( Parameter( "DesiredImageTag", Type="String", ) ) image_uri = template.add_parameter( Parameter( "ImageUri", Type="String", ) ) role = template.add_resource( Role( "Role", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[sts.AssumeRole], Principal=Principal("Service", "lambda.amazonaws.com"), ), ], ), ) ) function, alias = common.add_versioned_lambda( template, Ref(deployment_id), Function( "Function", MemorySize=256, Timeout=30, Role=GetAtt(role, "Arn"), PackageType="Image", Code=Code( ImageUri=Ref(image_uri), ), ImageConfig=ImageConfig( Command=[ Join(":", (handler.__module__, handler.__name__)), ], ), ), ) log_group = template.add_resource( LogGroup( "LogGroup", LogGroupName=Join("/", ["/aws/lambda", Ref(function)]), RetentionInDays=common.LOG_RETENTION_DAYS, ) ) policy = template.add_resource( PolicyType( "Policy", PolicyName=Ref(role), PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[logs.PutLogEvents, logs.CreateLogStream], Resource=[GetAtt(log_group, "Arn")], ), Statement( Effect=Allow, Action=[ecr.BatchGetImage, ecr.PutImage], # TODO scope down Resource=["*"], ), ], ), Roles=[Ref(role)], ) ) template.add_resource( CustomResource( "ImageTag", ServiceToken=Ref(alias), DeploymentId=Ref(deployment_id), RepositoryName=Ref(artifact_repository), ImageDigest=Ref(image_digest), ImageTag=Ref(desired_image_tag), DependsOn=[policy], ) ) return template
def _add_processor(self): self.ProcessorExecRole = self.add_resource( Role( "ProcessorExecRole", AssumeRolePolicyDocument=Policy( Version="2012-10-17", Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", "lambda.amazonaws.com")) ]), Path="/", )) # Common statements for accessing Redis self.PushMessageStatements = [ Statement(Effect=Allow, Action=[ elasticache.DescribeCacheClusters, ], Resource=["*"]), Statement(Effect=Allow, Action=[ ddb.ListTables, ddb.DescribeTable, ], Resource=["*"]), ] self.ProcessorLambdaPolicy = self.add_resource( PolicyType( "ProcessorLambdaPolicy", PolicyName="ProcessorLambdaRole", PolicyDocument=Policy( Version="2012-10-17", Statement=[ Statement(Effect=Allow, Action=[ Action("logs", "CreateLogGroup"), Action("logs", "CreateLogStream"), Action("logs", "PutLogEvents"), ], Resource=["arn:aws:logs:*:*:*"]), Statement(Effect=Allow, Action=[ s3.GetBucketLocation, s3.GetObject, s3.ListBucket, s3.ListBucketMultipartUploads, ], Resource=[ Join("", [ "arn:aws:s3:::", Ref(self.FirehoseLoggingBucket), ]), Join("", [ "arn:aws:s3:::", Ref(self.FirehoseLoggingBucket), "/*", ]) ]), Statement(Effect=Allow, Action=[ ec2.CreateNetworkInterface, ec2.DescribeNetworkInterfaces, ec2.DeleteNetworkInterface, ], Resource=["*"]), ] + self.PushMessageStatements), Roles=[Ref(self.ProcessorExecRole)], DependsOn="ProcessorExecRole")) self.ProcessorS3Settings = self.add_resource( CustomResource("ProcessorS3Settings", ServiceToken=GetAtt(self.S3WriterCFCustomResource, "Arn"), Bucket=Ref(self.FirehoseLoggingBucket), Key="processor_settings.json", Content=dict( redis_name=Ref(self.RedisCluster), file_type="json", ), DependsOn=["S3WriterCustomResource"])) self.ProcessorLambda = self.add_resource( Function("ProcessorLambda", Description=("Processes logfiles when they hit S3"), Runtime="python2.7", Timeout=300, Handler="lambda.handler", Role=GetAtt(self.ProcessorExecRole, "Arn"), Code=Code( S3Bucket=Ref(self.ProcessorLambdaBucket), S3Key=Ref(self.ProcessorLambdaKey), ), VpcConfig=VPCConfig( SecurityGroupIds=[ Ref(self.LambdaProcessorSG), ], SubnetIds=Ref(self.ProcessorSubnetIds), ), DependsOn=[ "ProcessorExecRole", "ProcessorS3Settings", ]))
def _add_firehose(self): self.FirehoseLoggingBucket = self.add_resource( Bucket( "FirehoseLoggingBucket", DeletionPolicy="Retain", )) self.FirehoseLoggingRole = self.add_resource( Role( "FirehoseRole", AssumeRolePolicyDocument=Policy( Version="2012-10-17", Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal( "Service", "firehose.amazonaws.com")) ]), Path="/", )) self.FirehosePolicy = self.add_resource( PolicyType("FirehosePolicy", PolicyName="FirehoseRole", PolicyDocument=Policy( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[ s3.AbortMultipartUpload, s3.GetBucketLocation, s3.GetObject, s3.ListBucket, s3.ListBucketMultipartUploads, s3.PutObject ], Resource=[ Join("", [ "arn:aws:s3:::", Ref(self.FirehoseLoggingBucket), ]), Join("", [ "arn:aws:s3:::", Ref(self.FirehoseLoggingBucket), "/*", ]) ]), ]), Roles=[Ref(self.FirehoseLoggingRole)])) self.FirehoseLogstream = self.add_resource( CustomResource("FirehoseLogStream", ServiceToken=GetAtt(self.FirehoseCFCustomResource, "Arn"), S3DestinationConfiguration=dict( RoleARN=GetAtt(self.FirehoseLoggingRole, "Arn"), BucketARN=Join("", [ "arn:aws:s3:::", Ref(self.FirehoseLoggingBucket), ]), BufferingHints=dict( SizeInMBs=5, IntervalInSeconds=60, )), DependsOn=["FirehosePolicy"])) self._template.add_output([ Output( "FirehoseLoggingBucket", Description="Firehose Logging Bucket", Value=Ref(self.FirehoseLoggingBucket), ) ])