def create_template(self) -> None: """Create template.""" base_name = "Dummy" for i in range(self.variables["Count"]): name = "%s%s" % (base_name, i) last_name = None if i: last_name = "%s%s" % (base_name, i - 1) wch = WaitConditionHandle(name) if last_name is not None: wch.DependsOn = last_name self.template.add_resource(wch) if self.variables["BreakLast"] and i == self.variables["Count"] - 1: self.template.add_resource( WaitCondition( "BrokenWaitCondition", Handle=wch.Ref(), # Timeout is made deliberately large so CF rejects it Timeout=2**32, Count=0, )) self.add_output("OutputValue", str(self.variables["OutputValue"])) self.add_output("WCHCount", str(self.variables["Count"]))
def create_template(self): v = self.get_variables() t = self.template base_name = "Dummy" for i in range(v["Count"]): name = "%s%s" % (base_name, i) last_name = None if i: last_name = "%s%s" % (base_name, i - 1) wch = WaitConditionHandle(name) if last_name is not None: wch.DependsOn = last_name t.add_resource(wch) self.add_output("OutputValue", str(v["OutputValue"])) self.add_output("WCHCount", str(v["Count"])) if v["BreakLast"]: t.add_resource( WaitCondition( "BrokenWaitCondition", Handle=wch.Ref(), # Timeout is made deliberately large so CF rejects it Timeout=2**32, Count=0))
def test_RequiredProps(self): handle = WaitConditionHandle("myWaitHandle") w = WaitCondition( "mycondition", Handle=Ref(handle), Timeout="300", ) w.validate()
def create_template(self): t = self.template t.add_resource(WaitConditionHandle("BrokenDummy")) t.add_resource(WaitCondition( "BrokenWaitCondition", Handle=Ref("BrokenDummy"), # Timeout is made deliberately large so CF rejects it Timeout=2 ** 32, Count=0)) t.add_output(Output("DummyId", Value="dummy-1234"))
def create_template(self) -> None: """Create template.""" self.template.add_resource(WaitConditionHandle("BrokenDummy")) self.template.add_resource( WaitCondition( "BrokenWaitCondition", Handle=Ref("BrokenDummy"), # Timeout is made deliberately large so CF rejects it Timeout=2 ** 32, Count=0, ) ) self.add_output("DummyId", "dummy-1234")
def create_template(self): input = self.get_variables()["StringVariable"] self.template.add_resource(WaitConditionHandle("Dummy")) self.template.add_output(Output("DummyId", Value="dummy-1234")) self.template.add_output(Output("StringOutput", Value=input)) self.template.add_output(Output("Region", Value=Ref("AWS::Region")))
def create_template(self): """Create template.""" for i in range(self.variables["WaitConditionCount"]): self.template.add_resource(WaitConditionHandle("VPC%d" % i))
def create_template(self) -> None: """Create template.""" self.template.add_resource(WaitConditionHandle("VPC"))
def create_template(self) -> None: """Create template.""" self.template.add_resource(WaitConditionHandle("Dummy")) self.template.add_output(Output("DummyId", Value="dummy-1234")) self.template.add_output(Output("Region", Value=Ref("AWS::Region")))
def sceptre_handler(scepter_user_data): template = Template() template.add_resource(WaitConditionHandle("WaitConditionHandle")) return template.to_json()
def main(): t = Template() t.set_description("test instance launch") t.set_version("2010-09-09") InstUserData = [ '#!/usr/bin/env bash\n', '\n', 'set -x\n', '\n', 'my_wait_handle="', Ref('InstanceWaitHandle'), '"\n', 'curl -X PUT -H \'Content-Type:\' --data-binary \'{ "Status" : "SUCCESS", "Reason" : "Instance launched", "UniqueId" : "launch001", "Data" : "Instance launched."}\' "${my_wait_handle}"', '\n', '\n', ] EC2KeyName = t.add_parameter( Parameter( 'EC2KeyName', Type="AWS::EC2::KeyPair::KeyName", Description= "Name of an existing EC2 KeyPair to enable SSH access to the instance.", ConstraintDescription="REQUIRED: Must be a valud EC2 key pair", )) OperatingSystem = t.add_parameter( Parameter('OperatingSystem', Type="String", Description="Operating System", Default="centos7", AllowedValues=[ "alinux2", "centos7", "rhel7", ], ConstraintDescription="Must be: alinux2, centos7, rhel7")) myInstanceType = t.add_parameter( Parameter( 'MyInstanceType', Type="String", Description="Instance type", Default="m5.2xlarge", )) VpcId = t.add_parameter( Parameter( 'VpcId', Type="AWS::EC2::VPC::Id", Description="VPC Id for this instance", )) Subnet = t.add_parameter( Parameter('Subnet', Type="AWS::EC2::Subnet::Id", Description="Subnet IDs")) ExistingSecurityGroup = t.add_parameter( Parameter( 'ExistingSecurityGroup', Type="AWS::EC2::SecurityGroup::Id", Description= "OPTIONAL: Choose an existing Security Group ID, e.g. sg-abcd1234") ) UsePublicIp = t.add_parameter( Parameter( 'UsePublicIp', Type="String", Description="Should a public IP address be given to the instance", Default="true", ConstraintDescription="true/false", AllowedValues=["true", "false"])) SshAccessCidr = t.add_parameter( Parameter( 'SshAccessCidr', Type="String", Description="CIDR Block for SSH access, default 127.0.0.1/32", Default="127.0.0.1/32", AllowedPattern= "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", ConstraintDescription="Must be a valid CIDR x.x.x.x/x")) RootRole = t.add_resource( iam.Role("RootRole", AssumeRolePolicyDocument={ "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] })) SshSecurityGroup = t.add_resource( SecurityGroup("SshSecurityGroup", VpcId=Ref(VpcId), GroupDescription="SSH Secuirty group", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp=Ref(SshAccessCidr), ), ])) RootInstanceProfile = t.add_resource( InstanceProfile("RootInstanceProfile", Roles=[Ref(RootRole)])) tags = Tags(Name=Ref("AWS::StackName")) myInstance = t.add_resource( ec2.Instance( 'MyInstance', ImageId=FindInMap("AWSRegionAMI", Ref("AWS::Region"), Ref(OperatingSystem)), KeyName=Ref(EC2KeyName), InstanceType=(Ref(myInstanceType)), NetworkInterfaces=[ NetworkInterfaceProperty( GroupSet=If( "not_existing_sg", [Ref(SshSecurityGroup)], [Ref(SshSecurityGroup), Ref(ExistingSecurityGroup)]), AssociatePublicIpAddress=Ref(UsePublicIp), DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(Subnet)) ], IamInstanceProfile=(Ref(RootInstanceProfile)), UserData=Base64(Join('', InstUserData)), )) t.add_mapping( 'AWSRegionAMI', { "ap-northeast-1": { "centos7": "ami-8e8847f1", "rhel7": "ami-6b0d5f0d" }, "ap-northeast-2": { "centos7": "ami-bf9c36d1", "rhel7": "ami-3eee4150" }, "ap-south-1": { "centos7": "ami-1780a878", "rhel7": "ami-5b673c34" }, "ap-southeast-1": { "centos7": "ami-8e0205f2", "rhel7": "ami-76144b0a" }, "ap-southeast-2": { "centos7": "ami-d8c21dba", "rhel7": "ami-67589505" }, "ca-central-1": { "centos7": "ami-e802818c", "rhel7": "ami-49f0762d" }, "eu-central-1": { "centos7": "ami-dd3c0f36", "rhel7": "ami-c86c3f23" }, "eu-west-1": { "centos7": "ami-3548444c", "rhel7": "ami-7c491f05" }, "eu-west-2": { "centos7": "ami-00846a67", "rhel7": "ami-7c1bfd1b" }, "eu-west-3": { "centos7": "ami-262e9f5b", "rhel7": "ami-5026902d" }, "sa-east-1": { "centos7": "ami-cb5803a7", "rhel7": "ami-b0b7e3dc" }, "us-east-1": { "centos7": "ami-9887c6e7", "rhel7": "ami-6871a115" }, "us-east-2": { "centos7": "ami-9c0638f9", "rhel7": "ami-03291866" }, "us-west-1": { "centos7": "ami-4826c22b", "rhel7": "ami-18726478" }, "us-west-2": { "centos7": "ami-3ecc8f46", "rhel7": "ami-28e07e50" } }) t.add_condition("not_existing_sg", Equals(Ref(ExistingSecurityGroup), "")) t.add_condition("Has_Public_Ip", Equals(Ref(UsePublicIp), "true")) mywaithandle = t.add_resource(WaitConditionHandle('InstanceWaitHandle')) mywaitcondition = t.add_resource( WaitCondition("InstanceWaitCondition", Handle=Ref(mywaithandle), Timeout="1500", DependsOn="MyInstance")) t.add_output([ Output("InstanceID", Description="Instance ID", Value=Ref(myInstance)) ]) t.add_output( [Output("InstancePrivateIP", Value=GetAtt('MyInstance', 'PrivateIp'))]) t.add_output([ Output("InstancePublicIP", Value=GetAtt('MyInstance', 'PublicIp'), Condition="Has_Public_Ip") ]) ##print(t.to_yaml()) print(t.to_json(indent=2))
def create_template(self): self.template.add_resource(WaitConditionHandle("VPC"))
def create_template(self): self.template.add_resource(WaitConditionHandle("Dummy")) self.template.add_output(Output("DummyId", Value="dummy-1234")) self.template.add_resource(WaitConditionHandle("Dummy2"))
def create_template(self): for i in range(self.get_variables()["WaitConditionCount"]): self.template.add_resource(WaitConditionHandle("VPC%d" % i))
def create_template(self) -> None: """Create template.""" self.template.add_resource(WaitConditionHandle("Dummy")) self.template.add_resource(WaitConditionHandle("SecondDummy")) self.add_output("DummyId", "dummy-1234")
), ], )), Policy(PolicyName="PerforceHelixDescribeStackResource", PolicyDocument=awacs.aws.Policy(Statement=[ awacs.aws.Statement( Effect=awacs.aws.Allow, Action=[ awacs.aws.Action("cloudformation", "DescribeStackResource") ], Resource=["*"], ), ], )), ])) WaitHandle = t.add_resource(WaitConditionHandle("WaitHandle", )) # Resources End # Outputs HostedZoneFQDN = t.add_output( Output( "HostedZoneFQDN", Value=Ref("HostRecord"), Description="FQDN.", Condition="ProdNotify", )) AvailabilityZoneMainInstance = t.add_output( Output( "AvailabilityZoneMainInstance",
else: node_name = AGENT_NODE_NAME_TEMPLATE.format(index=i) # Create an EC2 instance for the {Agent, Control} Node. ec2_instance = ec2.Instance( node_name, ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "FlockerAMI"), InstanceType="m3.large", KeyName=Ref(keyname_param), SecurityGroups=[Ref(instance_sg)], AvailabilityZone=zone, Tags=Tags(Name=node_name)) # WaitCondition and corresponding Handler to signal completion # of {Flocker, Docker, Swarm} configuration on the node. wait_condition_handle = WaitConditionHandle( INFRA_WAIT_HANDLE_TEMPLATE.format(node=node_name)) template.add_resource(wait_condition_handle) wait_condition = WaitCondition( INFRA_WAIT_CONDITION_TEMPLATE.format(node=node_name), Handle=Ref(wait_condition_handle), Timeout="600", ) template.add_resource(wait_condition) user_data = base_user_data[:] user_data += [ 'node_number="{}"\n'.format(i), 'node_name="{}"\n'.format(node_name), 'wait_condition_handle="', Ref(wait_condition_handle), '"\n', ]
t = Template() t.set_description( "Example template showing how the WaitCondition and WaitConditionHandle " "are configured. With this template, the stack will not complete until " "either the WaitCondition timeout occurs, or you manually signal the " "WaitCondition object using the URL created by the WaitConditionHandle. " "You can use CURL or some other equivalent mechanism to signal the " "WaitCondition. To find the URL, use cfn-describe-stack-resources or " "the AWS Management Console to display the PhysicalResourceId of the " "WaitConditionHandle - this is the URL to use to signal. For details of " "the signal request see the AWS CloudFormation User Guide at " "http://docs.amazonwebservices.com/AWSCloudFormation/latest/UserGuide/") mywaithandle = t.add_resource(WaitConditionHandle("myWaitHandle")) mywaitcondition = t.add_resource( WaitCondition( "myWaitCondition", Handle=Ref(mywaithandle), Timeout="300", )) t.add_output([ Output( "ApplicationData", Value=GetAtt(mywaitcondition, "Data"), Description="The data passed back as part of signalling the " "WaitCondition", )
def create_template(): template = Template(Description=( "Static website hosted with S3 and CloudFront. " "https://github.com/schlarpc/overengineered-cloudfront-s3-static-website" )) partition_config = add_mapping( template, "PartitionConfig", { "aws": { # the region with the control plane for CloudFront, IAM, Route 53, etc "PrimaryRegion": "us-east-1", # assume that Lambda@Edge replicates to all default enabled regions, and that # future regions will be opt-in. generated with AWS CLI: # aws ec2 describe-regions --all-regions --query "Regions[?OptInStatus=='opt-in-not-required'].RegionName|sort(@)" "DefaultRegions": [ "ap-northeast-1", "ap-northeast-2", "ap-northeast-3", "ap-south-1", "ap-southeast-1", "ap-southeast-2", "ca-central-1", "eu-central-1", "eu-north-1", "eu-west-1", "eu-west-2", "eu-west-3", "sa-east-1", "us-east-1", "us-east-2", "us-west-1", "us-west-2", ], }, # this doesn't actually work, because Lambda@Edge isn't supported in aws-cn "aws-cn": { "PrimaryRegion": "cn-north-1", "DefaultRegions": ["cn-north-1", "cn-northwest-1"], }, }, ) acm_certificate_arn = template.add_parameter( Parameter( "AcmCertificateArn", Description= "Existing ACM certificate to use for serving TLS. Overrides HostedZoneId.", Type="String", AllowedPattern="(arn:[^:]+:acm:[^:]+:[^:]+:certificate/.+|)", Default="", )) hosted_zone_id = template.add_parameter( Parameter( "HostedZoneId", Description= "Existing Route 53 zone to use for validating a new TLS certificate.", Type="String", AllowedPattern="(Z[A-Z0-9]+|)", Default="", )) dns_names = template.add_parameter( Parameter( "DomainNames", Description= "Comma-separated list of additional domain names to serve.", Type="CommaDelimitedList", Default="", )) tls_protocol_version = template.add_parameter( Parameter( "TlsProtocolVersion", Description= "CloudFront TLS security policy; see https://amzn.to/2DR91Xq for details.", Type="String", Default="TLSv1.2_2019", )) log_retention_days = template.add_parameter( Parameter( "LogRetentionDays", Description= "Days to keep CloudFront, S3, and Lambda logs. 0 means indefinite retention.", Type="Number", AllowedValues=[0] + CLOUDWATCH_LOGS_RETENTION_OPTIONS, Default=365, )) default_ttl_seconds = template.add_parameter( Parameter( "DefaultTtlSeconds", Description="Cache time-to-live when not set by S3 object headers.", Type="Number", Default=int(datetime.timedelta(minutes=5).total_seconds()), )) enable_price_class_hack = template.add_parameter( Parameter( "EnablePriceClassHack", Description="Cut your bill in half with this one weird trick.", Type="String", Default="false", AllowedValues=["true", "false"], )) retention_defined = add_condition(template, "RetentionDefined", Not(Equals(Ref(log_retention_days), 0))) using_price_class_hack = add_condition( template, "UsingPriceClassHack", Equals(Ref(enable_price_class_hack), "true")) using_acm_certificate = add_condition( template, "UsingAcmCertificate", Not(Equals(Ref(acm_certificate_arn), ""))) using_hosted_zone = add_condition(template, "UsingHostedZone", Not(Equals(Ref(hosted_zone_id), ""))) using_certificate = add_condition( template, "UsingCertificate", Or(Condition(using_acm_certificate), Condition(using_hosted_zone)), ) should_create_certificate = add_condition( template, "ShouldCreateCertificate", And(Condition(using_hosted_zone), Not(Condition(using_acm_certificate))), ) using_dns_names = add_condition(template, "UsingDnsNames", Not(Equals(Select(0, Ref(dns_names)), ""))) is_primary_region = "IsPrimaryRegion" template.add_condition( is_primary_region, Equals(Region, FindInMap(partition_config, Partition, "PrimaryRegion")), ) precondition_region_is_primary = template.add_resource( WaitConditionHandle( "PreconditionIsPrimaryRegionForPartition", Condition=is_primary_region, )) log_ingester_dlq = template.add_resource( Queue( "LogIngesterDLQ", MessageRetentionPeriod=int( datetime.timedelta(days=14).total_seconds()), KmsMasterKeyId="alias/aws/sqs", )) log_ingester_role = template.add_resource( Role( "LogIngesterRole", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect="Allow", Principal=Principal("Service", "lambda.amazonaws.com"), Action=[sts.AssumeRole], ) ], ), Policies=[ PolicyProperty( PolicyName="DLQPolicy", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[sqs.SendMessage], Resource=[GetAtt(log_ingester_dlq, "Arn")], ) ], ), ) ], )) log_ingester = template.add_resource( Function( "LogIngester", Runtime=PYTHON_RUNTIME, Handler="index.{}".format(log_ingest.handler.__name__), Code=Code(ZipFile=inspect.getsource(log_ingest)), MemorySize=256, Timeout=300, Role=GetAtt(log_ingester_role, "Arn"), DeadLetterConfig=DeadLetterConfig( TargetArn=GetAtt(log_ingester_dlq, "Arn")), )) log_ingester_permission = template.add_resource( Permission( "LogIngesterPermission", FunctionName=GetAtt(log_ingester, "Arn"), Action="lambda:InvokeFunction", Principal="s3.amazonaws.com", SourceAccount=AccountId, )) log_bucket = template.add_resource( Bucket( "LogBucket", # S3 requires this ACL (regardless of bucket policy) or s3:PutBucketLogging fails. # When the CloudFront distribution is created, it adds an additional bucket ACL. # That ACL is not possible to model in CloudFormation. AccessControl="LogDeliveryWrite", LifecycleConfiguration=LifecycleConfiguration(Rules=[ LifecycleRule(ExpirationInDays=1, Status="Enabled"), LifecycleRule( AbortIncompleteMultipartUpload= AbortIncompleteMultipartUpload(DaysAfterInitiation=1), Status="Enabled", ), ]), NotificationConfiguration=NotificationConfiguration( LambdaConfigurations=[ LambdaConfigurations(Event="s3:ObjectCreated:*", Function=GetAtt(log_ingester, "Arn")) ]), BucketEncryption=BucketEncryption( ServerSideEncryptionConfiguration=[ ServerSideEncryptionRule( ServerSideEncryptionByDefault= ServerSideEncryptionByDefault( # if we use KMS, we can't read the logs SSEAlgorithm="AES256")) ]), OwnershipControls=OwnershipControls(Rules=[ OwnershipControlsRule(ObjectOwnership="BucketOwnerPreferred") ], ), PublicAccessBlockConfiguration=PublicAccessBlockConfiguration( BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True, ), DependsOn=[log_ingester_permission], )) log_ingester_log_group = template.add_resource( LogGroup( "LogIngesterLogGroup", LogGroupName=Join( "", ["/aws/lambda/", Ref(log_ingester)]), RetentionInDays=If(retention_defined, Ref(log_retention_days), NoValue), )) log_ingester_policy = template.add_resource( PolicyType( "LogIngesterPolicy", Roles=[Ref(log_ingester_role)], PolicyName="IngestLogPolicy", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[logs.CreateLogStream, logs.PutLogEvents], Resource=[ Join( ":", [ "arn", Partition, "logs", Region, AccountId, "log-group", "/aws/cloudfront/*", ], ), Join( ":", [ "arn", Partition, "logs", Region, AccountId, "log-group", "/aws/s3/*", ], ), GetAtt(log_ingester_log_group, "Arn"), ], ), Statement( Effect=Allow, Action=[s3.GetObject], Resource=[Join("", [GetAtt(log_bucket, "Arn"), "/*"])], ), ], ), )) bucket = template.add_resource( Bucket( "ContentBucket", LifecycleConfiguration=LifecycleConfiguration(Rules=[ # not supported by CFN yet: # LifecycleRule( # Transitions=[ # LifecycleRuleTransition( # StorageClass='INTELLIGENT_TIERING', # TransitionInDays=1, # ), # ], # Status="Enabled", # ), LifecycleRule( AbortIncompleteMultipartUpload= AbortIncompleteMultipartUpload(DaysAfterInitiation=7), Status="Enabled", ) ]), LoggingConfiguration=LoggingConfiguration( DestinationBucketName=Ref(log_bucket), LogFilePrefix="s3/"), BucketEncryption=BucketEncryption( ServerSideEncryptionConfiguration=[ ServerSideEncryptionRule( ServerSideEncryptionByDefault= ServerSideEncryptionByDefault( # Origin Access Identities can't use KMS SSEAlgorithm="AES256")) ]), OwnershipControls=OwnershipControls(Rules=[ OwnershipControlsRule(ObjectOwnership="BucketOwnerPreferred") ], ), PublicAccessBlockConfiguration=PublicAccessBlockConfiguration( BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True, ), )) origin_access_identity = template.add_resource( CloudFrontOriginAccessIdentity( "CloudFrontIdentity", CloudFrontOriginAccessIdentityConfig= CloudFrontOriginAccessIdentityConfig( Comment=GetAtt(bucket, "Arn")), )) bucket_policy = template.add_resource( BucketPolicy( "ContentBucketPolicy", Bucket=Ref(bucket), PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Principal=Principal( "CanonicalUser", GetAtt(origin_access_identity, "S3CanonicalUserId"), ), Action=[s3.GetObject], Resource=[Join("", [GetAtt(bucket, "Arn"), "/*"])], ), ], ), )) # Not strictly necessary, as ACLs should take care of this access. However, CloudFront docs # state "In some circumstances [...] S3 resets permissions on the bucket to the default value", # and this allows logging to work without any ACLs in place. log_bucket_policy = template.add_resource( BucketPolicy( "LogBucketPolicy", Bucket=Ref(log_bucket), PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Principal=Principal("Service", "delivery.logs.amazonaws.com"), Action=[s3.PutObject], Resource=[ Join( "/", [GetAtt(log_bucket, "Arn"), "cloudfront", "*"]) ], ), Statement( Effect=Allow, Principal=Principal("Service", "delivery.logs.amazonaws.com"), Action=[s3.ListBucket], Resource=[Join("/", [GetAtt(log_bucket, "Arn")])], ), Statement( Effect=Allow, Principal=Principal("Service", "s3.amazonaws.com"), Action=[s3.PutObject], Resource=[ Join("/", [GetAtt(log_bucket, "Arn"), "s3", "*"]) ], ), ], ), )) certificate_validator_dlq = template.add_resource( Queue( "CertificateValidatorDLQ", MessageRetentionPeriod=int( datetime.timedelta(days=14).total_seconds()), KmsMasterKeyId="alias/aws/sqs", Condition=should_create_certificate, )) certificate_validator_role = template.add_resource( Role( "CertificateValidatorRole", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect="Allow", Principal=Principal("Service", "lambda.amazonaws.com"), Action=[sts.AssumeRole], ) ], ), Policies=[ PolicyProperty( PolicyName="DLQPolicy", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[sqs.SendMessage], Resource=[ GetAtt(certificate_validator_dlq, "Arn") ], ) ], ), ) ], # TODO scope down ManagedPolicyArns=[ "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole", "arn:aws:iam::aws:policy/AmazonRoute53FullAccess", "arn:aws:iam::aws:policy/AWSCertificateManagerReadOnly", ], Condition=should_create_certificate, )) certificate_validator_function = template.add_resource( Function( "CertificateValidatorFunction", Runtime=PYTHON_RUNTIME, Handler="index.{}".format(certificate_validator.handler.__name__), Code=Code(ZipFile=inspect.getsource(certificate_validator)), MemorySize=256, Timeout=300, Role=GetAtt(certificate_validator_role, "Arn"), DeadLetterConfig=DeadLetterConfig( TargetArn=GetAtt(certificate_validator_dlq, "Arn")), Environment=Environment( Variables={ certificate_validator.EnvVars.HOSTED_ZONE_ID.name: Ref(hosted_zone_id) }), Condition=should_create_certificate, )) certificate_validator_log_group = template.add_resource( LogGroup( "CertificateValidatorLogGroup", LogGroupName=Join( "", ["/aws/lambda/", Ref(certificate_validator_function)]), RetentionInDays=If(retention_defined, Ref(log_retention_days), NoValue), Condition=should_create_certificate, )) certificate_validator_rule = template.add_resource( Rule( "CertificateValidatorRule", EventPattern={ "detail-type": ["AWS API Call via CloudTrail"], "detail": { "eventSource": ["acm.amazonaws.com"], "eventName": ["AddTagsToCertificate"], "requestParameters": { "tags": { "key": [certificate_validator_function.title], "value": [GetAtt(certificate_validator_function, "Arn")], } }, }, }, Targets=[ Target( Id="certificate-validator-lambda", Arn=GetAtt(certificate_validator_function, "Arn"), ) ], DependsOn=[certificate_validator_log_group], Condition=should_create_certificate, )) certificate_validator_permission = template.add_resource( Permission( "CertificateValidatorPermission", FunctionName=GetAtt(certificate_validator_function, "Arn"), Action="lambda:InvokeFunction", Principal="events.amazonaws.com", SourceArn=GetAtt(certificate_validator_rule, "Arn"), Condition=should_create_certificate, )) certificate = template.add_resource( Certificate( "Certificate", DomainName=Select(0, Ref(dns_names)), SubjectAlternativeNames=Ref( dns_names), # duplicate first name works fine ValidationMethod="DNS", Tags=Tags( **{ certificate_validator_function.title: GetAtt(certificate_validator_function, "Arn") }), DependsOn=[certificate_validator_permission], Condition=should_create_certificate, )) edge_hook_role = template.add_resource( Role( "EdgeHookRole", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect="Allow", Principal=Principal( "Service", [ "lambda.amazonaws.com", "edgelambda.amazonaws.com" ], ), Action=[sts.AssumeRole], ) ], ), )) edge_hook_function = template.add_resource( Function( "EdgeHookFunction", Runtime=PYTHON_RUNTIME, Handler="index.handler", Code=Code(ZipFile=inspect.getsource(edge_hook)), MemorySize=128, Timeout=3, Role=GetAtt(edge_hook_role, "Arn"), )) edge_hook_function_hash = (hashlib.sha256( json.dumps(edge_hook_function.to_dict(), sort_keys=True).encode("utf-8")).hexdigest()[:10].upper()) edge_hook_version = template.add_resource( Version( "EdgeHookVersion" + edge_hook_function_hash, FunctionName=GetAtt(edge_hook_function, "Arn"), )) replica_log_group_name = Join( "/", [ "/aws/lambda", Join( ".", [ FindInMap(partition_config, Partition, "PrimaryRegion"), Ref(edge_hook_function), ], ), ], ) edge_hook_role_policy = template.add_resource( PolicyType( "EdgeHookRolePolicy", PolicyName="write-logs", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[logs.CreateLogStream, logs.PutLogEvents], Resource=[ Join( ":", [ "arn", Partition, "logs", "*", AccountId, "log-group", replica_log_group_name, "log-stream", "*", ], ), ], ), ], ), Roles=[Ref(edge_hook_role)], )) stack_set_administration_role = template.add_resource( Role( "StackSetAdministrationRole", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Principal=Principal("Service", "cloudformation.amazonaws.com"), Action=[sts.AssumeRole], ), ], ), )) stack_set_execution_role = template.add_resource( Role( "StackSetExecutionRole", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Principal=Principal( "AWS", GetAtt(stack_set_administration_role, "Arn")), Action=[sts.AssumeRole], ), ], ), Policies=[ PolicyProperty( PolicyName="create-stackset-instances", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[ cloudformation.DescribeStacks, logs.DescribeLogGroups, ], Resource=["*"], ), # stack instances communicate with the CFN service via SNS Statement( Effect=Allow, Action=[sns.Publish], NotResource=[ Join( ":", [ "arn", Partition, "sns", "*", AccountId, "*" ], ) ], ), Statement( Effect=Allow, Action=[ logs.CreateLogGroup, logs.DeleteLogGroup, logs.PutRetentionPolicy, logs.DeleteRetentionPolicy, ], Resource=[ Join( ":", [ "arn", Partition, "logs", "*", AccountId, "log-group", replica_log_group_name, "log-stream", "", ], ), ], ), Statement( Effect=Allow, Action=[ cloudformation.CreateStack, cloudformation.DeleteStack, cloudformation.UpdateStack, ], Resource=[ Join( ":", [ "arn", Partition, "cloudformation", "*", AccountId, Join( "/", [ "stack", Join( "-", [ "StackSet", StackName, "*" ], ), ], ), ], ) ], ), ], ), ), ], )) stack_set_administration_role_policy = template.add_resource( PolicyType( "StackSetAdministrationRolePolicy", PolicyName="assume-execution-role", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[sts.AssumeRole], Resource=[GetAtt(stack_set_execution_role, "Arn")], ), ], ), Roles=[Ref(stack_set_administration_role)], )) edge_log_groups = template.add_resource( StackSet( "EdgeLambdaLogGroupStackSet", AdministrationRoleARN=GetAtt(stack_set_administration_role, "Arn"), ExecutionRoleName=Ref(stack_set_execution_role), StackSetName=Join("-", [StackName, "EdgeLambdaLogGroup"]), PermissionModel="SELF_MANAGED", Description="Multi-region log groups for Lambda@Edge replicas", Parameters=[ StackSetParameter( ParameterKey="LogGroupName", ParameterValue=replica_log_group_name, ), StackSetParameter( ParameterKey="LogRetentionDays", ParameterValue=Ref(log_retention_days), ), ], OperationPreferences=OperationPreferences( FailureToleranceCount=0, MaxConcurrentPercentage=100, ), StackInstancesGroup=[ StackInstances( DeploymentTargets=DeploymentTargets(Accounts=[AccountId]), Regions=FindInMap(partition_config, Partition, "DefaultRegions"), ) ], TemplateBody=create_log_group_template().to_json(indent=None), DependsOn=[stack_set_administration_role_policy], )) price_class_distribution = template.add_resource( Distribution( "PriceClassDistribution", DistributionConfig=DistributionConfig( Comment="Dummy distribution used for price class hack", DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId="default", ViewerProtocolPolicy="allow-all", ForwardedValues=ForwardedValues(QueryString=False), ), Enabled=True, Origins=[ Origin(Id="default", DomainName=GetAtt(bucket, "DomainName")) ], IPV6Enabled=True, ViewerCertificate=ViewerCertificate( CloudFrontDefaultCertificate=True), PriceClass="PriceClass_All", ), Condition=using_price_class_hack, )) distribution = template.add_resource( Distribution( "ContentDistribution", DistributionConfig=DistributionConfig( Enabled=True, Aliases=If(using_dns_names, Ref(dns_names), NoValue), Logging=Logging(Bucket=GetAtt(log_bucket, "DomainName"), Prefix="cloudfront/"), DefaultRootObject="index.html", Origins=[ Origin( Id="default", DomainName=GetAtt(bucket, "DomainName"), S3OriginConfig=S3OriginConfig( OriginAccessIdentity=Join( "", [ "origin-access-identity/cloudfront/", Ref(origin_access_identity), ], )), ) ], DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId="default", Compress=True, ForwardedValues=ForwardedValues(QueryString=False), ViewerProtocolPolicy="redirect-to-https", DefaultTTL=Ref(default_ttl_seconds), LambdaFunctionAssociations=[ LambdaFunctionAssociation( EventType="origin-request", LambdaFunctionARN=Ref(edge_hook_version), ) ], ), HttpVersion="http2", IPV6Enabled=True, ViewerCertificate=ViewerCertificate( AcmCertificateArn=If( using_acm_certificate, Ref(acm_certificate_arn), If(using_hosted_zone, Ref(certificate), NoValue), ), SslSupportMethod=If(using_certificate, "sni-only", NoValue), CloudFrontDefaultCertificate=If(using_certificate, NoValue, True), MinimumProtocolVersion=Ref(tls_protocol_version), ), PriceClass=If(using_price_class_hack, "PriceClass_100", "PriceClass_All"), ), DependsOn=[ bucket_policy, log_ingester_policy, edge_log_groups, precondition_region_is_primary, ], )) distribution_log_group = template.add_resource( LogGroup( "DistributionLogGroup", LogGroupName=Join( "", ["/aws/cloudfront/", Ref(distribution)]), RetentionInDays=If(retention_defined, Ref(log_retention_days), NoValue), )) bucket_log_group = template.add_resource( LogGroup( "BucketLogGroup", LogGroupName=Join("", ["/aws/s3/", Ref(bucket)]), RetentionInDays=If(retention_defined, Ref(log_retention_days), NoValue), )) template.add_output(Output("DistributionId", Value=Ref(distribution))) template.add_output( Output("DistributionDomain", Value=GetAtt(distribution, "DomainName"))) template.add_output( Output( "DistributionDnsTarget", Value=If( using_price_class_hack, GetAtt(price_class_distribution, "DomainName"), GetAtt(distribution, "DomainName"), ), )) template.add_output( Output( "DistributionUrl", Value=Join("", ["https://", GetAtt(distribution, "DomainName"), "/"]), )) template.add_output(Output("ContentBucketArn", Value=GetAtt(bucket, "Arn"))) return template
def add_rule(self): t = self.template self.rule = t.add_resource(WaitConditionHandle("WaitConditionHandle"))
def main(): t = Template() AddAMIMap(t) t.set_version("2010-09-09") t.set_description( "DCV 2017 Remote Desktop with Xilinx Vivado (using AWS FPGA Developer AMI)" ) tags = Tags(Name=Ref("AWS::StackName")) # user data InstUserData = list() InstUserData = [ '#!/usr/bin/env bash\n', '\n', 'set -x\n', '\n', '##exit 0\n', # use this to disable all user-data and bring up files '\n', 'my_wait_handle="', Ref('InstanceWaitHandle'), '"\n', 'user_name="', Ref('UserName'), '"\n', 'user_pass="******"\n', '\n', ] with open('_include/dcv-install.sh', 'r',) as ud_file: user_data_file = ud_file.readlines() for l in user_data_file: InstUserData.append(l) VPCId = t.add_parameter(Parameter( 'VPCId', Type="AWS::EC2::VPC::Id", Description="VPC ID for where the remote desktop instance should be launched" )) t.set_parameter_label(VPCId, "VPC ID") t.add_parameter_to_group(VPCId, "Instance Configuration") Subnet = t.add_parameter(Parameter( 'Subnet', Type="AWS::EC2::Subnet::Id", Description="For the Subnet ID, you should choose one in the " "Availability Zone where you want the instance launched" )) t.set_parameter_label(Subnet, "Subnet ID") t.add_parameter_to_group(Subnet, "Instance Configuration") ExistingSecurityGroup = t.add_parameter(Parameter( 'ExistingSecurityGroup', Type="String", Default="NO_VALUE", Description="OPTIONAL: Needs to be a SG ID, for example sg-abcd1234efgh. " "This is an already existing Security Group ID that is " "in the same VPC, this is an addition to the security groups that " "are automatically created to enable access to the remote desktop," "leave as NO_VALUE if you choose not to use this" )) t.set_parameter_label(ExistingSecurityGroup, "OPTIONAL: Existing Security Group (e.g. sg-abcd1234efgh)") t.add_parameter_to_group(ExistingSecurityGroup, "Instance Configuration") remoteDesktopInstanceType = t.add_parameter(Parameter( 'remoteDesktopInstanceType', Type="String", Description="This is the instance type that will be used. As this is a " "2D workstation, we are not supporting GPU instance types.", Default="m4.xlarge", AllowedValues=[ "m4.large", "m4.xlarge", "m4.2xlarge", "m4.4xlarge", "m4.10xlarge", "m5.large", "m5.xlarge", "m5.2xlarge", "m5.4xlarge", "m5.12xlarge", "m5.24xlarge", "z1d.large", "z1d.xlarge", "z1d.2xlarge", "z1d.3xlarge", "z1d.6xlarge", "z1d.12xlarge", "z1d.metal" ], ConstraintDescription= "Must an EC2 instance type from the list" )) t.set_parameter_label(remoteDesktopInstanceType, "Remote Desktop Instance Type") t.add_parameter_to_group(remoteDesktopInstanceType, "Instance Configuration") EC2KeyName = t.add_parameter(Parameter( 'EC2KeyName', Type="AWS::EC2::KeyPair::KeyName", Description="Name of an existing EC2 KeyPair to enable SSH access to the instance.", ConstraintDescription="REQUIRED: Must be a valid EC2 key pair" )) t.set_parameter_label(EC2KeyName, "EC2 Key Name") t.add_parameter_to_group(EC2KeyName, "Instance Configuration") OperatingSystem = t.add_parameter(Parameter( 'OperatingSystem', Type="String", Description="Operating System of the AMI", Default="centos7", AllowedValues=[ "centos7" ], ConstraintDescription="Must be: centos7" )) t.set_parameter_label(OperatingSystem, "Operating System of AMI") t.add_parameter_to_group(OperatingSystem, "Instance Configuration") StaticPrivateIpAddress = t.add_parameter(Parameter( 'StaticPrivateIpAddress', Type="String", Default="NO_VALUE", Description="OPTIONAL: If you already have a private VPC address range, you can " "specify the private IP address to use, leave as NO_VALUE if you choose not to use this", )) t.set_parameter_label(StaticPrivateIpAddress, "OPTIONAL: Static Private IP Address") t.add_parameter_to_group(StaticPrivateIpAddress, "Instance Configuration") UsePublicIp = t.add_parameter(Parameter( 'UsePublicIp', Type="String", Description="Should a public IP address be given to the instance, " "this is overridden by CreateElasticIP=True", Default="True", ConstraintDescription="True/False", AllowedValues=[ "True", "False" ] )) t.set_parameter_label(UsePublicIp, "Assign a public IP Address") t.add_parameter_to_group(UsePublicIp, "Instance Configuration") CreateElasticIP = t.add_parameter(Parameter( 'CreateElasticIP', Type="String", Description="Should an Elastic IP address be created and assigned, " "this allows for persistent IP address assignment", Default="True", ConstraintDescription="True/False", AllowedValues=[ "True", "False" ] )) t.set_parameter_label(CreateElasticIP, "Create an Elastic IP address") t.add_parameter_to_group(CreateElasticIP, "Instance Configuration") S3BucketName = t.add_parameter(Parameter( 'S3BucketName', Type="String", Default="NO_VALUE", Description="OPTIONAL: S3 bucket to allow this instance read access (List and Get)," "leave as NO_VALUE if you choose not to use this" )) t.set_parameter_label(S3BucketName, "OPTIONAL: S3 bucket for read access") t.add_parameter_to_group(S3BucketName, "Instance Configuration") AccessCidr = t.add_parameter(Parameter( 'AccessCidr', Type="String", Description="This is the CIDR block for allowing remote access, for ports 22 and 8443", Default="111.222.333.444/32", AllowedPattern="(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", ConstraintDescription="Must be a valid CIDR x.x.x.x/x" )) t.set_parameter_label(AccessCidr, "CIDR block for remote access (ports 22 and 8443)") t.add_parameter_to_group(AccessCidr, "Instance Configuration") UserName = t.add_parameter(Parameter( 'UserName', Type="String", Description="User name for DCV remote desktop login, default is \"simuser\".", Default="simuser", MinLength="4", )) t.set_parameter_label(UserName, "User name for DCV login") t.add_parameter_to_group(UserName, "DCV Configuration") UserPass = t.add_parameter(Parameter( 'UserPass', Type="String", Description="Password for DCV remote desktop login. The default password is Ch4ng3M3!", Default="Ch4ng3M3!", MinLength="8", AllowedPattern="^((?=.*[a-z])(?=.*[A-Z])(?=.*[\\d])|(?=.*[a-z])(?=.*[A-Z])(?=.*[\\W_])|(?=.*[a-z])(?=.*[\\d])(?=.*[\\W_])|(?=.*[A-Z])(?=.*[\\d])(?=.*[\\W_])).+$", ConstraintDescription="Password must contain at least one element from three of the following sets: lowercase letters, uppercase letters, base 10 digits, non-alphanumeric characters", NoEcho=True )) t.set_parameter_label(UserPass, "Password for DCV login") t.add_parameter_to_group(UserPass, "DCV Configuration") # end parameters RootRole = t.add_resource(iam.Role( "RootRole", AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"], }, "Action": ["sts:AssumeRole"] }] } )) dcvBucketPolicy= t.add_resource(PolicyType( "dcvBucketPolicy", PolicyName="dcvBucketPolicy", Roles=[Ref(RootRole)], PolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": ["s3:GetObject"], "Resource": "arn:aws:s3:::dcv-license.us-east-1/*" } ], }, )), BucketPolicy= t.add_resource(PolicyType( "BucketPolicy", PolicyName="BucketPolicy", Roles=[Ref(RootRole)], PolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": ["s3:GetObject"], "Resource": {"Fn::Join":["", ["arn:aws:s3:::", {"Ref": "S3BucketName"},"/*"]]} }, { "Effect": "Allow", "Action": [ "s3:ListBucket"], "Resource": {"Fn::Join":["", ["arn:aws:s3:::", {"Ref": "S3BucketName"}]]} } ], }, Condition="Has_Bucket" )), remoteDesktopSecurityGroup = t.add_resource(SecurityGroup( "remoteDesktopSecurityGroup", VpcId = Ref(VPCId), GroupDescription = "Remote Desktop Secuirty group", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="8443", ToPort="8443", CidrIp=Ref(AccessCidr), ), ] )) SshSecurityGroup = t.add_resource(SecurityGroup( "SshSecurityGroup", VpcId = Ref(VPCId), GroupDescription = "SSH Secuirty group", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp=Ref(AccessCidr), ), ] )) RootInstanceProfile = t.add_resource(InstanceProfile( "RootInstanceProfile", Roles=[Ref(RootRole)] )) remoteDesktopInstance = t.add_resource(ec2.Instance( 'remoteDesktopInstance', ImageId=FindInMap("AWSRegionAMI", Ref("AWS::Region"), Ref(OperatingSystem)), KeyName=Ref(EC2KeyName), InstanceType=(Ref(remoteDesktopInstanceType)), DisableApiTermination='false', NetworkInterfaces=[ NetworkInterfaceProperty( SubnetId=Ref(Subnet), GroupSet=If( "not_existing_sg", [Ref(remoteDesktopSecurityGroup), Ref(SshSecurityGroup)], [Ref(remoteDesktopSecurityGroup), Ref(SshSecurityGroup), Ref(ExistingSecurityGroup)] ), AssociatePublicIpAddress=Ref(UsePublicIp), DeviceIndex='0', DeleteOnTermination='true', PrivateIpAddress=If( "Has_Static_Private_IP", Ref(StaticPrivateIpAddress), Ref("AWS::NoValue"), ) ) ], IamInstanceProfile=(Ref(RootInstanceProfile)), UserData=Base64(Join('', InstUserData)), )) EIPAddress = t.add_resource(EIP( 'EIPAddress', Domain='vpc', InstanceId=Ref(remoteDesktopInstance), Condition="create_elastic_ip" )) t.add_condition( "not_existing_sg", Equals(Ref(ExistingSecurityGroup), "NO_VALUE") ) t.add_condition( "Has_Public_Ip", Equals(Ref(UsePublicIp), "True") ) t.add_condition( "Has_Bucket", Not(Equals(Ref(S3BucketName), "NO_VALUE")) ) t.add_condition( "create_elastic_ip", Equals(Ref(CreateElasticIP), "True") ) t.add_condition( "Has_Static_Private_IP", Not(Equals(Ref(StaticPrivateIpAddress), "NO_VALUE")) ) waithandle = t.add_resource(WaitConditionHandle('InstanceWaitHandle')) instanceWaitCondition = t.add_resource(WaitCondition( "instanceWaitCondition", Handle=Ref(waithandle), Timeout="3600", DependsOn="remoteDesktopInstance" )) t.add_output([ Output( "DCVConnectionLink", Description="Connect to the DCV Remote Desktop with this URL", Value=Join("", [ "https://", GetAtt("remoteDesktopInstance", 'PublicIp'), ":8443" ]) ), Output( "DCVUserName", Description="Login name for DCV session", Value=(Ref(UserName)) ), Output( "SSHTunnelCommand", Description='Command for setting up SSH tunnel to remote desktop, use "localhost:18443" for DCV client', Value=Join("", [ "ssh -i <file.pem> -L 18443:localhost:8443 -l centos ", GetAtt("remoteDesktopInstance", 'PublicIp') ]) ), ]) #print(t.to_json(indent=2)) print(to_yaml(t.to_json(indent=2), clean_up=True))
}) t.add_mapping( "AMAZONLINUX2015", { "eu-west-1": { "AMI": "ami-d1f482b1" }, "us-east-1": { "AMI": "ami-8fcee4e5" }, "us-west-2": { "AMI": "ami-63b25203" } }) waitHandleAmbari = t.add_resource(WaitConditionHandle("waitHandleAmbari")) waitConditionAmbari = t.add_resource( WaitCondition( "waitConditionAmbari", Handle=Ref(waitHandleAmbari), Timeout="3600", )) ## Functions to generate blockdevicemappings ## count: the number of devices to map ## devicenamebase: "/dev/sd" or "/dev/xvd" ## volumesize: "100" ## volumetype: "gp2" def my_block_device_mappings_root(devicenamebase, volumesize, volumetype):
def main(): t = Template() t.add_version("2010-09-09") t.add_description( "Currently supporting RHEL/CentOS 7.5. Setup IAM role and security groups, " "launch instance, create/attach 10 EBS volumes, install/fix ZFS " "(http://download.zfsonlinux.org/epel/zfs-release.el7_5.noarch.rpm), " "create zfs RAID6 pool, setup NFS server, export NFS share") InstUserData = list() InstUserData = [ '#!/usr/bin/env bash\n', '\n', 'set -x\n', '\n', '##exit 0\n', # use this to disable all user-data and bring up files '\n', 'zfs_pool_name="', Ref('ZfsPool'), '"\n', 'zfs_mount_point="', Ref('ZfsMountPoint'), '"\n', 'nfs_cidr_block="', Ref('NFSCidr'), '"\n', 'nfs_opts="', Ref('NFSOpts'), '"\n', 'my_wait_handle="', Ref('NFSInstanceWaitHandle'), '"\n', '\n', ] with open( '_include/Tropo_build_zfs_export_nfs.sh', 'r', ) as ud_file: user_data_file = ud_file.readlines() for l in user_data_file: InstUserData.append(l) t.add_metadata({ 'AWS::CloudFormation::Interface': { 'ParameterGroups': [{ 'Label': { 'default': 'Instance Configuration' }, 'Parameters': [ "OperatingSystem", "VPCId", "Subnet", "UsePublicIp", "CreateElasticIP", "EC2KeyName", "NFSInstanceType", "SshAccessCidr", "ExistingSecurityGroup", "ExistingPlacementGroup", "S3BucketName" ] }, { 'Label': { 'default': 'Storage Options - Required' }, 'Parameters': [ "RAIDLevel", "VolumeSize", "VolumeType", "EBSVolumeType", "VolumeIops" ] }, { 'Label': { 'default': 'ZFS Pool and FS Options - Required' }, 'Parameters': ["ZfsPool", "ZfsMountPoint"] }, { 'Label': { 'default': 'NFS Options - Required' }, 'Parameters': ["NFSCidr", "NFSOpts"] }], 'ParameterLabels': { 'OperatingSystem': { 'default': 'Operating System of AMI' }, 'VPCId': { 'default': 'VPC ID' }, 'Subnet': { 'default': 'Subnet ID' }, 'UsePublicIp': { 'default': 'Assign a Public IP ' }, 'CreateElasticIP': { 'default': 'Create and use an EIP ' }, 'EC2KeyName': { 'default': 'EC2 Key Name' }, 'NFSInstanceType': { 'default': 'Instance Type' }, 'SshAccessCidr': { 'default': 'SSH Access CIDR Block' }, 'ExistingSecurityGroup': { 'default': 'OPTIONAL: Existing Security Group' }, 'ExistingPlacementGroup': { 'default': 'OPTIONAL: Existing Placement Group' }, 'S3BucketName': { 'default': 'Optional S3 Bucket Name' }, 'RAIDLevel': { 'default': 'RAID Level' }, 'VolumeSize': { 'default': 'Volume size of the EBS vol' }, 'VolumeType': { 'default': 'Volume type of the EBS vol' }, 'EBSVolumeType': { 'default': 'Volume type of the EBS vol' }, 'VolumeIops': { 'default': 'IOPS for each EBS vol (only for io1)' }, 'ZfsPool': { 'default': 'ZFS pool name' }, 'ZfsMountPoint': { 'default': 'Mount Point' }, 'NFSCidr': { 'default': 'NFS CIDR block for mounts' }, 'NFSOpts': { 'default': 'NFS options' }, } } }) EC2KeyName = t.add_parameter( Parameter( 'EC2KeyName', Type="AWS::EC2::KeyPair::KeyName", Description= "Name of an existing EC2 KeyPair to enable SSH access to the instance.", ConstraintDescription="REQUIRED: Must be a valud EC2 key pair")) OperatingSystem = t.add_parameter( Parameter('OperatingSystem', Type="String", Description="Operating System", Default="centos7", AllowedValues=[ "alinux2", "centos7", "rhel7", ], ConstraintDescription="Must be: alinux2, centos7, rhel7")) NFSInstanceType = t.add_parameter( Parameter( 'NFSInstanceType', Type="String", Description="NFS instance type", Default="r4.16xlarge", AllowedValues=[ "m4.16xlarge", "m4.10xlarge", "r4.16xlarge", "c8.8xlarge" ], ConstraintDescription="Must an EC2 instance type from the list")) VolumeType = t.add_parameter( Parameter( 'VolumeType', Type="String", Description="Type of EBS volume", Default="EBS", AllowedValues=["EBS", "InstanceStore"], ConstraintDescription="Volume type has to EBS or InstanceStore")) EBSVolumeType = t.add_parameter( Parameter('EBSVolumeType', Description="Type of EBS volumes to create", Type="String", Default="io1", ConstraintDescription="Must be a either: io1, gp2, st1", AllowedValues=["io1", "gp2", "st1"])) VolumelSize = t.add_parameter( Parameter('VolumeSize', Type="Number", Default="500", Description="Volume size in GB")) VolumeIops = t.add_parameter( Parameter('VolumeIops', Type="Number", Default="20000", Description="IOPS for the EBS volume")) RAIDLevel = t.add_parameter( Parameter( 'RAIDLevel', Description="RAID Level, currently only 6 (8+2p) is supported", Type="String", Default="0", AllowedValues=["0"], ConstraintDescription="Must be 0")) ZfsPool = t.add_parameter( Parameter('ZfsPool', Description="ZFS pool name", Type="String", Default="v01")) ZfsMountPoint = t.add_parameter( Parameter( 'ZfsMountPoint', Description= "ZFS mount point, absolute path will be /pool_name/mount_point (e.g. /v01/testzfs)", Type="String", Default="testzfs")) VPCId = t.add_parameter( Parameter('VPCId', Type="AWS::EC2::VPC::Id", Description="VPC Id for this instance")) ExistingPlacementGroup = t.add_parameter( Parameter('ExistingPlacementGroup', Type="String", Description="OPTIONAL: Existing placement group")) Subnet = t.add_parameter( Parameter('Subnet', Type="AWS::EC2::Subnet::Id", Description="Subnet IDs")) ExistingSecurityGroup = t.add_parameter( Parameter( 'ExistingSecurityGroup', Type="AWS::EC2::SecurityGroup::Id", Description= "OPTIONAL: Choose an existing Security Group ID, e.g. sg-abcd1234") ) UsePublicIp = t.add_parameter( Parameter( 'UsePublicIp', Type="String", Description="Should a public IP address be given to the instance", Default="true", ConstraintDescription="true/talse", AllowedValues=["true", "false"])) CreateElasticIP = t.add_parameter( Parameter( 'CreateElasticIP', Type="String", Description= "Create an Elasic IP address, that will be assinged to an instance", Default="true", ConstraintDescription="true/false", AllowedValues=["true", "false"])) S3BucketName = t.add_parameter( Parameter('S3BucketName', Type="String", Description="S3 bucket to allow this instance read access.")) SshAccessCidr = t.add_parameter( Parameter( 'SshAccessCidr', Type="String", Description="CIDR Block for SSH access, default 0.0.0.0/0", Default="0.0.0.0/0", AllowedPattern= "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", ConstraintDescription="Must be a valid CIDR x.x.x.x/x")) NFSCidr = t.add_parameter( Parameter( 'NFSCidr', Type="String", Description= "CIDR for NFS Security Group and NFS clients, to allow all access use 0.0.0.0/0", Default="10.0.0.0/16", AllowedPattern= "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", ConstraintDescription="Must be a valid CIDR x.x.x.x/x")) NFSOpts = t.add_parameter( Parameter( 'NFSOpts', Description="NFS export options", Type="String", Default="(rw,async,no_root_squash,wdelay,no_subtree_check,no_acl)") ) VarLogMessagesFile = t.add_parameter( Parameter( 'VarLogMessagesFile', Type="String", Description= "S3 bucket and file name for log CloudWatch config (e.g. s3://jouser-logs/var-log-message.config)" )) RootRole = t.add_resource( iam.Role("RootRole", AssumeRolePolicyDocument={ "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] }, Policies=[ iam.Policy(PolicyName="s3bucketaccess", PolicyDocument={ "Statement": [{ "Effect": "Allow", "Action": ["s3:GetObject"], "Resource": { "Fn::Join": [ "", [ "arn:aws:s3:::", { "Ref": "S3BucketName" }, "/*" ] ] } }, { "Effect": "Allow", "Action": ["s3:ListBucket"], "Resource": { "Fn::Join": [ "", [ "arn:aws:s3:::", { "Ref": "S3BucketName" } ] ] } }], }), ])) NFSSecurityGroup = t.add_resource( SecurityGroup("NFSSecurityGroup", VpcId=Ref(VPCId), GroupDescription="NFS Secuirty group", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="2049", ToPort="2049", CidrIp=Ref(NFSCidr), ), ])) SshSecurityGroup = t.add_resource( SecurityGroup("SshSecurityGroup", VpcId=Ref(VPCId), GroupDescription="SSH Secuirty group", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp=Ref(SshAccessCidr), ), ])) RootInstanceProfile = t.add_resource( InstanceProfile("RootInstanceProfile", Roles=[Ref(RootRole)])) EIPAddress = t.add_resource( EIP('EIPAddress', Domain='vpc', Condition="create_elastic_ip")) tags = Tags(Name=Ref("AWS::StackName")) NFSInstance = t.add_resource( ec2.Instance( 'NFSInstance', ImageId=FindInMap("AWSRegionAMI", Ref("AWS::Region"), Ref(OperatingSystem)), KeyName=Ref(EC2KeyName), InstanceType=(Ref(NFSInstanceType)), NetworkInterfaces=[ NetworkInterfaceProperty( GroupSet=If("not_existing_sg", [Ref(NFSSecurityGroup), Ref(SshSecurityGroup)], [ Ref(NFSSecurityGroup), Ref(SshSecurityGroup), Ref(ExistingSecurityGroup) ]), AssociatePublicIpAddress=Ref(UsePublicIp), DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(Subnet)) ], IamInstanceProfile=(Ref(RootInstanceProfile)), PlacementGroupName=(Ref(ExistingPlacementGroup)), BlockDeviceMappings=If( 'vol_type_ebs', [ ec2.BlockDeviceMapping( DeviceName="/dev/sdh", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ec2.BlockDeviceMapping( DeviceName="/dev/sdi", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ec2.BlockDeviceMapping( DeviceName="/dev/sdj", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ec2.BlockDeviceMapping( DeviceName="/dev/sdk", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ec2.BlockDeviceMapping( DeviceName="/dev/sdl", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ec2.BlockDeviceMapping( DeviceName="/dev/sdm", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ], {"Ref": "AWS::NoValue"}, ), UserData=Base64(Join('', InstUserData)), )) # End of NFSInstance t.add_mapping( 'AWSRegionAMI', { "ap-northeast-1": { "centos7": "ami-8e8847f1", "rhel7": "ami-6b0d5f0d" }, "ap-northeast-2": { "centos7": "ami-bf9c36d1", "rhel7": "ami-3eee4150" }, "ap-south-1": { "centos7": "ami-1780a878", "rhel7": "ami-5b673c34" }, "ap-southeast-1": { "centos7": "ami-8e0205f2", "rhel7": "ami-76144b0a" }, "ap-southeast-2": { "centos7": "ami-d8c21dba", "rhel7": "ami-67589505" }, "ca-central-1": { "centos7": "ami-e802818c", "rhel7": "ami-49f0762d" }, "eu-central-1": { "centos7": "ami-dd3c0f36", "rhel7": "ami-c86c3f23" }, "eu-west-1": { "centos7": "ami-3548444c", "rhel7": "ami-7c491f05" }, "eu-west-2": { "centos7": "ami-00846a67", "rhel7": "ami-7c1bfd1b" }, "eu-west-3": { "centos7": "ami-262e9f5b", "rhel7": "ami-5026902d" }, "sa-east-1": { "centos7": "ami-cb5803a7", "rhel7": "ami-b0b7e3dc" }, "us-east-1": { "centos7": "ami-9887c6e7", "rhel7": "ami-6871a115" }, "us-east-2": { "centos7": "ami-9c0638f9", "rhel7": "ami-03291866" }, "us-west-1": { "centos7": "ami-4826c22b", "rhel7": "ami-18726478" }, "us-west-2": { "centos7": "ami-3ecc8f46", "rhel7": "ami-28e07e50" } }) t.add_condition("not_existing_sg", Equals(Ref(ExistingSecurityGroup), "")) t.add_condition("vol_type_ebs", Equals(Ref(VolumeType), "EBS")) t.add_condition("Has_Public_Ip", Equals(Ref(UsePublicIp), "True")) t.add_condition("Has_Bucket", Not(Equals(Ref(S3BucketName), ""))) t.add_condition("create_elastic_ip", Equals(Ref(CreateElasticIP), "True")) nfswaithandle = t.add_resource( WaitConditionHandle('NFSInstanceWaitHandle')) nfswaitcondition = t.add_resource( WaitCondition("NFSInstanceWaitCondition", Handle=Ref(nfswaithandle), Timeout="1500", DependsOn="NFSInstance")) t.add_output([ Output("ElasticIP", Description="Elastic IP address for the instance", Value=Ref(EIPAddress), Condition="create_elastic_ip") ]) t.add_output([ Output("InstanceID", Description="Instance ID", Value=Ref(NFSInstance)) ]) t.add_output([ Output("InstancePrivateIP", Value=GetAtt('NFSInstance', 'PrivateIp')) ]) t.add_output([ Output("InstancePublicIP", Value=GetAtt('NFSInstance', 'PublicIp'), Condition="Has_Public_Ip") ]) t.add_output([ Output("ElasticPublicIP", Value=GetAtt('NFSInstance', 'PublicIp'), Condition="create_elastic_ip") ]) t.add_output([ Output("PrivateMountPoint", Description="Mount point on private network", Value=Join("", [GetAtt('NFSInstance', 'PrivateIp'), ":/fs1"])) ]) t.add_output([ Output("ExampleClientMountCommands", Description="Example commands to mount NFS on the clients", Value=Join("", [ "sudo mkdir /nfs1; sudo mount ", GetAtt('NFSInstance', 'PrivateIp'), ":/", Ref("ZfsPool"), "/", Ref("ZfsMountPoint"), " /nfs1" ])) ]) t.add_output([ Output("S3BucketName", Value=(Ref("S3BucketName")), Condition="Has_Bucket") ]) # "Volume01" : { "Value" : { "Ref" : "Volume01" } }, # "Volume02" : { "Value" : { "Ref" : "Volume02" } }, # "Volume03" : { "Value" : { "Ref" : "Volume03" } }, # "Volume04" : { "Value" : { "Ref" : "Volume04" } }, # "Volume05" : { "Value" : { "Ref" : "Volume05" } }, # "Volume06" : { "Value" : { "Ref" : "Volume06" } }, # "Volume07" : { "Value" : { "Ref" : "Volume07" } }, # "Volume08" : { "Value" : { "Ref" : "Volume08" } }, # "Volume09" : { "Value" : { "Ref" : "Volume09" } }, # "Volume10" : { "Value" : { "Ref" : "Volume10" } } print(t.to_json(indent=2))
def flocker_docker_template(cluster_size, client_ami_map, node_ami_map): """ :param int cluster_size: The number of nodes to create in the Flocker cluster (including control service node). :param dict client_ami_map: A map between AWS region name and AWS AMI ID for the client. :param dict node_ami_map: A map between AWS region name and AWS AMI ID for the node. :returns: a CloudFormation template for a Flocker + Docker + Docker Swarm cluster. """ # Base JSON template. template = Template() # Keys corresponding to CloudFormation user Inputs. access_key_id_param = template.add_parameter( Parameter( "AmazonAccessKeyID", Description="Required: Your Amazon AWS access key ID", Type="String", NoEcho=True, AllowedPattern="[\w]+", MinLength="16", MaxLength="32", )) secret_access_key_param = template.add_parameter( Parameter( "AmazonSecretAccessKey", Description="Required: Your Amazon AWS secret access key", Type="String", NoEcho=True, MinLength="1", )) keyname_param = template.add_parameter( Parameter( "EC2KeyPair", Description= "Required: Name of an existing EC2 KeyPair to enable SSH " "access to the instance", Type="AWS::EC2::KeyPair::KeyName", )) template.add_parameter( Parameter( "S3AccessPolicy", Description="Required: Is current IAM user allowed to access S3? " "S3 access is required to distribute Flocker and Docker " "configuration amongst stack nodes. Reference: " "http://docs.aws.amazon.com/IAM/latest/UserGuide/" "access_permissions.html Stack creation will fail if user " "cannot access S3", Type="String", AllowedValues=["Yes"], )) volumehub_token = template.add_parameter( Parameter( "VolumeHubToken", Description=("Optional: Your Volume Hub token. " "You'll find the token at " "https://volumehub.clusterhq.com/v1/token."), Type="String", Default="", )) template.add_mapping('RegionMapClient', {k: { "AMI": v } for k, v in client_ami_map.items()}) template.add_mapping('RegionMapNode', {k: { "AMI": v } for k, v in node_ami_map.items()}) # Select a random AvailabilityZone within given AWS Region. zone = Select(0, GetAZs("")) # S3 bucket to hold {Flocker, Docker, Swarm} configuration for distribution # between nodes. s3bucket = Bucket('ClusterConfig', DeletionPolicy='Retain') template.add_resource(s3bucket) # Create SecurityGroup for cluster instances. instance_sg = template.add_resource( ec2.SecurityGroup( "InstanceSecurityGroup", GroupDescription=( "Enable ingress access on all protocols and ports."), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol=protocol, FromPort="0", ToPort="65535", CidrIp="0.0.0.0/0", ) for protocol in ('tcp', 'udp') ])) # Base for post-boot {Flocker, Docker, Swarm} configuration on the nodes. base_user_data = [ '#!/bin/bash\n', 'aws_region="', Ref("AWS::Region"), '"\n', 'aws_zone="', zone, '"\n', 'access_key_id="', Ref(access_key_id_param), '"\n', 'secret_access_key="', Ref(secret_access_key_param), '"\n', 's3_bucket="', Ref(s3bucket), '"\n', 'stack_name="', Ref("AWS::StackName"), '"\n', 'volumehub_token="', Ref(volumehub_token), '"\n', 'node_count="{}"\n'.format(cluster_size), 'apt-get update\n', ] # XXX Flocker agents are indexed from 1 while the nodes overall are indexed # from 0. flocker_agent_number = 1 # Gather WaitConditions wait_condition_names = [] for i in range(cluster_size): if i == 0: node_name = CONTROL_NODE_NAME else: node_name = AGENT_NODE_NAME_TEMPLATE.format(index=i) # Create an EC2 instance for the {Agent, Control} Node. ec2_instance = ec2.Instance(node_name, ImageId=FindInMap("RegionMapNode", Ref("AWS::Region"), "AMI"), InstanceType="m3.large", KeyName=Ref(keyname_param), SecurityGroups=[Ref(instance_sg)], AvailabilityZone=zone, Tags=Tags(Name=node_name)) # WaitCondition and corresponding Handler to signal completion # of {Flocker, Docker, Swarm} configuration on the node. wait_condition_handle = WaitConditionHandle( INFRA_WAIT_HANDLE_TEMPLATE.format(node=node_name)) template.add_resource(wait_condition_handle) wait_condition = WaitCondition( INFRA_WAIT_CONDITION_TEMPLATE.format(node=node_name), Handle=Ref(wait_condition_handle), Timeout=NODE_CONFIGURATION_TIMEOUT, ) template.add_resource(wait_condition) # Gather WaitConditions wait_condition_names.append(wait_condition.name) user_data = base_user_data[:] user_data += [ 'node_number="{}"\n'.format(i), 'node_name="{}"\n'.format(node_name), 'wait_condition_handle="', Ref(wait_condition_handle), '"\n', ] # Setup S3 utilities to push/pull node-specific data to/from S3 bucket. user_data += _sibling_lines(S3_SETUP) if i == 0: # Control Node configuration. control_service_instance = ec2_instance user_data += ['flocker_node_type="control"\n'] user_data += _sibling_lines(FLOCKER_CONFIGURATION_GENERATOR) user_data += _sibling_lines(DOCKER_SWARM_CA_SETUP) user_data += _sibling_lines(DOCKER_SETUP) # Setup Swarm 1.0.1 user_data += _sibling_lines(SWARM_MANAGER_SETUP) template.add_output([ Output( "ControlNodeIP", Description="Public IP of Flocker Control and " "Swarm Manager.", Value=GetAtt(ec2_instance, "PublicIp"), ) ]) else: # Agent Node configuration. ec2_instance.DependsOn = control_service_instance.name user_data += [ 'flocker_node_type="agent"\n', 'flocker_agent_number="{}"\n'.format(flocker_agent_number) ] flocker_agent_number += 1 user_data += _sibling_lines(DOCKER_SETUP) # Setup Swarm 1.0.1 user_data += _sibling_lines(SWARM_NODE_SETUP) template.add_output([ Output( "AgentNode{}IP".format(i), Description=( "Public IP of Agent Node for Flocker and Swarm."), Value=GetAtt(ec2_instance, "PublicIp"), ) ]) user_data += _sibling_lines(FLOCKER_CONFIGURATION_GETTER) user_data += _sibling_lines(VOLUMEHUB_SETUP) user_data += _sibling_lines(SIGNAL_CONFIG_COMPLETION) ec2_instance.UserData = Base64(Join("", user_data)) template.add_resource(ec2_instance) # Client Node creation. client_instance = ec2.Instance(CLIENT_NODE_NAME, ImageId=FindInMap("RegionMapClient", Ref("AWS::Region"), "AMI"), InstanceType="m3.medium", KeyName=Ref(keyname_param), SecurityGroups=[Ref(instance_sg)], AvailabilityZone=zone, Tags=Tags(Name=CLIENT_NODE_NAME)) wait_condition_handle = WaitConditionHandle(CLIENT_WAIT_HANDLE) template.add_resource(wait_condition_handle) wait_condition = WaitCondition( CLIENT_WAIT_CONDITION, Handle=Ref(wait_condition_handle), Timeout=NODE_CONFIGURATION_TIMEOUT, ) template.add_resource(wait_condition) # Client Node {Flockerctl, Docker-compose} configuration. user_data = base_user_data[:] user_data += [ 'wait_condition_handle="', Ref(wait_condition_handle), '"\n', 'node_number="{}"\n'.format("-1"), ] user_data += _sibling_lines(S3_SETUP) user_data += _sibling_lines(CLIENT_SETUP) user_data += _sibling_lines(SIGNAL_CONFIG_COMPLETION) client_instance.UserData = Base64(Join("", user_data)) # Start Client Node after Control Node and Agent Nodes are # up and running Flocker, Docker, Swarm stack. client_instance.DependsOn = wait_condition_names template.add_resource(client_instance) # List of Output fields upon successful creation of the stack. template.add_output([ Output( "ClientNodeIP", Description="Public IP address of the client node.", Value=GetAtt(client_instance, "PublicIp"), ) ]) template.add_output( Output( "ClientConfigDockerSwarmHost", Value=Join("", [ "export DOCKER_HOST=tcp://", GetAtt(control_service_instance, "PublicIp"), ":2376" ]), Description="Client config: Swarm Manager's DOCKER_HOST setting.")) template.add_output( Output("ClientConfigDockerTLS", Value="export DOCKER_TLS_VERIFY=1", Description="Client config: Enable TLS client for Swarm.")) return template.to_json()
def create_touch_wait_condition_handle(self): self.template.description = ("touch waits for nothing and " "returns quickly") self.template.add_resource( WaitConditionHandle("touchNothing") )