def set_mesh_name(): """ Function to set the mesh name if default is passed, otherwise generate it. :return: If """ return If(USE_DEFAULT_MESH_NAME_CON_T, Ref(AWS_STACK_NAME), Ref(MESH_NAME))
def main(args): t = Template() # ================= Parameters ================= # 0 1 2 3 4 5 6 # [shared_dir,fsx_fs_id,storage_capacity,fsx_kms_key_id,imported_file_chunk_size,export_path,import_path, # 7 # weekly_maintenance_start_time] fsx_options = t.add_parameter( Parameter( "FSXOptions", Type="CommaDelimitedList", Description="Comma separated list of fsx related options, 8 parameters in total, [shared_dir,fsx_fs_id," "storage_capacity,fsx_kms_key_id,imported_file_chunk_size,export_path,import_path," "weekly_maintenance_start_time]", ) ) compute_security_group = t.add_parameter( Parameter("ComputeSecurityGroup", Type="String", Description="SecurityGroup for FSx filesystem") ) subnet_id = t.add_parameter(Parameter("SubnetId", Type="String", Description="SubnetId for FSx filesystem")) # ================= Conditions ================= create_fsx = t.add_condition( "CreateFSX", And(Not(Equals(Select(str(0), Ref(fsx_options)), "NONE")), Equals(Select(str(1), Ref(fsx_options)), "NONE")), ) use_storage_capacity = t.add_condition("UseStorageCap", Not(Equals(Select(str(2), Ref(fsx_options)), "NONE"))) use_fsx_kms_key = t.add_condition("UseFSXKMSKey", Not(Equals(Select(str(3), Ref(fsx_options)), "NONE"))) use_imported_file_chunk_size = t.add_condition( "UseImportedFileChunkSize", Not(Equals(Select(str(4), Ref(fsx_options)), "NONE")) ) use_export_path = t.add_condition("UseExportPath", Not(Equals(Select(str(5), Ref(fsx_options)), "NONE"))) use_import_path = t.add_condition("UseImportPath", Not(Equals(Select(str(6), Ref(fsx_options)), "NONE"))) use_weekly_mainenance_start_time = t.add_condition( "UseWeeklyMaintenanceStartTime", Not(Equals(Select(str(7), Ref(fsx_options)), "NONE")) ) # ================= Resources ================= fs = t.add_resource( FileSystem( "FileSystem", FileSystemType="LUSTRE", SubnetIds=[Ref(subnet_id)], SecurityGroupIds=[Ref(compute_security_group)], KmsKeyId=If(use_fsx_kms_key, Select(str(3), Ref(fsx_options)), NoValue), StorageCapacity=If(use_storage_capacity, Select(str(2), Ref(fsx_options)), NoValue), LustreConfiguration=LustreConfiguration( ImportedFileChunkSize=If(use_imported_file_chunk_size, Select(str(4), Ref(fsx_options)), NoValue), ExportPath=If(use_export_path, Select(str(5), Ref(fsx_options)), NoValue), ImportPath=If(use_import_path, Select(str(6), Ref(fsx_options)), NoValue), WeeklyMaintenanceStartTime=If( use_weekly_mainenance_start_time, Select(str(7), Ref(fsx_options)), NoValue ), ), Condition=create_fsx, ) ) # ================= Outputs ================= t.add_output( Output( "FileSystemId", Description="ID of the FileSystem", Value=If(create_fsx, Ref(fs), Select("1", Ref(fsx_options))), ) ) # Specify output file path json_file_path = args.target_path output_file = open(json_file_path, "w") output_file.write(t.to_json()) output_file.close()
certificate_validation_method = template.add_parameter( Parameter( title="CertificateValidationMethod", Default="DNS", AllowedValues=['DNS', 'Email'], Type='String', Description="" "How to validate domain ownership for issuing an SSL certificate - " "highly recommend DNS. Either way, stack creation will pause until " "you do something to complete the validation." ), group="Global", label="Certificate Validation Method" ) application = Ref(template.add_resource( Certificate( 'Certificate', DomainName=domain_name, SubjectAlternativeNames=If(no_alt_domains, Ref("AWS::NoValue"), domain_name_alternates), DomainValidationOptions=[ DomainValidationOption( DomainName=domain_name, ValidationDomain=domain_name, ), ], ValidationMethod=Ref(certificate_validation_method) ) ))
Default='Suspended', Type='String', AllowedValues=['Enabled', 'Suspended'])) # # Condition # t.add_condition('HasBucketName', Not(Equals(Ref(param_bucket_name), ''))) t.add_condition('IsChinaRegion', Equals(Ref(AWS_REGION), 'cn-north-1')) # # Resource # bucket = t.add_resource( s3.Bucket('Bucket', BucketName=If('HasBucketName', Ref(param_bucket_name), Ref(AWS_NO_VALUE)), VersioningConfiguration=s3.VersioningConfiguration( Status=Ref(param_versioning)), LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule( Id='S3BucketRule1', Prefix='', Status='Enabled', Transitions=[ s3.LifecycleRuleTransition( StorageClass='STANDARD_IA', TransitionInDays=365, ), ], NoncurrentVersionExpirationInDays=90, NoncurrentVersionTransitions=[
], Resource=['*']), aws.Statement(Effect=aws.Allow, Action=[ aws.Action('logs', 'CreateLogGroup'), aws.Action('logs', 'CreateLogStream'), aws.Action('logs', 'PutLogEvents'), ], Resource=['arn:aws:logs:*:*:*']), If( "UseEncryption", Ref(AWS_NO_VALUE), aws.Statement( Effect=aws.Allow, Action=[ aws. Action('kms', 'Create*' ), # Don't ask me why this is needed... aws.Action('kms', 'DescribeKey'), ], Resource=[Ref(kms_key_parameter)]), ), ])) ])) backup_rds_function = template.add_resource( awslambda.Function( 'LambdaBackupRDSFunction', Description='Copies RDS backups to another region', Code=awslambda.Code( S3Bucket=Ref(s3_bucket_parameter),
DBName=Ref(db_name), Condition=db_condition, AllocatedStorage=Ref(db_allocated_storage), DBInstanceClass=Ref(db_class), Engine=Ref(db_engine), EngineVersion=Ref(db_engine_version), MultiAZ=Ref(db_multi_az), StorageEncrypted=use_aes256_encryption, StorageType="gp2", MasterUsername=Ref(db_user), MasterUserPassword=Ref(db_password), DBSubnetGroupName=Ref(db_subnet_group), VPCSecurityGroups=[Ref(db_security_group)], DBParameterGroupName=Ref(db_parameter_group), BackupRetentionPeriod=Ref(db_backup_retention_days), EnableCloudwatchLogsExports=If(db_logging_condition, Ref(db_logging), Ref("AWS::NoValue")), DeletionPolicy="Snapshot", KmsKeyId=If(use_cmk_arn, Ref(cmk_arn), Ref("AWS::NoValue")), ) db_replica = rds.DBInstance( "DatabaseReplica", template=template, Condition=db_replication_condition, SourceDBInstanceIdentifier=Ref(db_instance), DBInstanceClass=Ref(db_class), Engine=Ref(db_engine), VPCSecurityGroups=[Ref(db_security_group)], ) db_url = If(
def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() template.add_version('2010-09-09') template.add_description('Static Website - Bucket and Distribution') # Conditions template.add_condition( 'AcmCertSpecified', And(Not(Equals(variables['AcmCertificateArn'].ref, '')), Not(Equals(variables['AcmCertificateArn'].ref, 'undefined')))) template.add_condition( 'AliasesSpecified', And(Not(Equals(Select(0, variables['Aliases'].ref), '')), Not(Equals(Select(0, variables['Aliases'].ref), 'undefined')))) template.add_condition( 'CFLoggingEnabled', And(Not(Equals(variables['LogBucketName'].ref, '')), Not(Equals(variables['LogBucketName'].ref, 'undefined')))) template.add_condition( 'DirectoryIndexSpecified', And(Not(Equals(variables['RewriteDirectoryIndex'].ref, '')), Not(Equals(variables['RewriteDirectoryIndex'].ref, 'undefined'))) # noqa ) template.add_condition( 'WAFNameSpecified', And(Not(Equals(variables['WAFWebACL'].ref, '')), Not(Equals(variables['WAFWebACL'].ref, 'undefined')))) # Resources oai = template.add_resource( cloudfront.CloudFrontOriginAccessIdentity( 'OAI', CloudFrontOriginAccessIdentityConfig=cloudfront. CloudFrontOriginAccessIdentityConfig( # noqa pylint: disable=line-too-long Comment='CF access to website'))) bucket = template.add_resource( s3.Bucket( 'Bucket', AccessControl=s3.Private, LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule(NoncurrentVersionExpirationInDays=90, Status='Enabled') ]), VersioningConfiguration=s3.VersioningConfiguration( Status='Enabled'), WebsiteConfiguration=s3.WebsiteConfiguration( IndexDocument='index.html', ErrorDocument='error.html'))) template.add_output( Output('BucketName', Description='Name of website bucket', Value=bucket.ref())) allowcfaccess = template.add_resource( s3.BucketPolicy( 'AllowCFAccess', Bucket=bucket.ref(), PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Action=[awacs.s3.GetObject], Effect=Allow, Principal=Principal( 'CanonicalUser', oai.get_att('S3CanonicalUserId')), Resource=[Join('', [bucket.get_att('Arn'), '/*'])]) ]))) cfdirectoryindexrewriterole = template.add_resource( iam.Role('CFDirectoryIndexRewriteRole', Condition='DirectoryIndexSpecified', AssumeRolePolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement(Effect=Allow, Action=[awacs.sts.AssumeRole], Principal=Principal( 'Service', [ 'lambda.amazonaws.com', 'edgelambda.amazonaws.com' ])) ]), ManagedPolicyArns=[ IAM_ARN_PREFIX + 'AWSLambdaBasicExecutionRole' ])) cfdirectoryindexrewrite = template.add_resource( awslambda.Function( 'CFDirectoryIndexRewrite', Condition='DirectoryIndexSpecified', Code=awslambda.Code(ZipFile=Join( '', [ "'use strict';\n", "exports.handler = (event, context, callback) => {\n", "\n", " // Extract the request from the CloudFront event that is sent to Lambda@Edge\n", # noqa pylint: disable=line-too-long " var request = event.Records[0].cf.request;\n", " // Extract the URI from the request\n", " var olduri = request.uri;\n", " // Match any '/' that occurs at the end of a URI. Replace it with a default index\n", # noqa pylint: disable=line-too-long " var newuri = olduri.replace(/\\/$/, '\\/", variables['RewriteDirectoryIndex'].ref, "');\n", # noqa " // Log the URI as received by CloudFront and the new URI to be used to fetch from origin\n", # noqa pylint: disable=line-too-long " console.log(\"Old URI: \" + olduri);\n", " console.log(\"New URI: \" + newuri);\n", " // Replace the received URI with the URI that includes the index page\n", # noqa pylint: disable=line-too-long " request.uri = newuri;\n", " // Return to CloudFront\n", " return callback(null, request);\n", "\n", "};\n" ])), Description= 'Rewrites CF directory HTTP requests to default page', # noqa Handler='index.handler', Role=cfdirectoryindexrewriterole.get_att('Arn'), Runtime='nodejs8.10')) # Generating a unique resource name here for the Lambda version, so it # updates automatically if the lambda code changes code_hash = hashlib.md5( str(cfdirectoryindexrewrite.properties['Code']. properties['ZipFile'].to_dict()).encode() # noqa pylint: disable=line-too-long ).hexdigest() cfdirectoryindexrewritever = template.add_resource( awslambda.Version('CFDirectoryIndexRewriteVer' + code_hash, Condition='DirectoryIndexSpecified', FunctionName=cfdirectoryindexrewrite.ref())) cfdistribution = template.add_resource( cloudfront.Distribution( 'CFDistribution', DependsOn=allowcfaccess.title, DistributionConfig=cloudfront.DistributionConfig( Aliases=If('AliasesSpecified', variables['Aliases'].ref, NoValue), Origins=[ cloudfront.Origin( DomainName=Join( '.', [bucket.ref(), 's3.amazonaws.com']), S3OriginConfig=cloudfront.S3Origin( OriginAccessIdentity=Join( '', [ 'origin-access-identity/cloudfront/', oai.ref() ])), Id='S3Origin') ], DefaultCacheBehavior=cloudfront.DefaultCacheBehavior( AllowedMethods=['GET', 'HEAD'], Compress=False, DefaultTTL='86400', ForwardedValues=cloudfront.ForwardedValues( Cookies=cloudfront.Cookies(Forward='none'), QueryString=False, ), LambdaFunctionAssociations=If( 'DirectoryIndexSpecified', [ cloudfront.LambdaFunctionAssociation( EventType='origin-request', LambdaFunctionARN=cfdirectoryindexrewritever .ref() # noqa ) ], NoValue), TargetOriginId='S3Origin', ViewerProtocolPolicy='redirect-to-https'), DefaultRootObject='index.html', Logging=If( 'CFLoggingEnabled', cloudfront.Logging(Bucket=Join('.', [ variables['LogBucketName'].ref, 's3.amazonaws.com' ])), NoValue), PriceClass=variables['PriceClass'].ref, Enabled=True, WebACLId=If('WAFNameSpecified', variables['WAFWebACL'].ref, NoValue), ViewerCertificate=If( 'AcmCertSpecified', cloudfront.ViewerCertificate( AcmCertificateArn=variables['AcmCertificateArn']. ref, # noqa SslSupportMethod='sni-only'), NoValue)))) template.add_output( Output('CFDistributionId', Description='CloudFront distribution ID', Value=cfdistribution.ref())) template.add_output( Output('CFDistributionDomainName', Description='CloudFront distribution domain name', Value=cfdistribution.get_att('DomainName')))
def attach(self): """Attaches a bootstrapped Chef Node EC2 instance to an AWS CloudFormation template and returns the template. """ parameters = ec2_parameters.EC2Parameters(self.template) parameters.attach() resources = ec2_resources.EC2Resources(self.template) resources.attach() security_group = self.template.add_resource(ec2.SecurityGroup( 'SecurityGroup', GroupDescription='Allows SSH access from anywhere', SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol='tcp', FromPort=22, ToPort=22, CidrIp=Ref(self.template.parameters['SSHLocation']) ), ec2.SecurityGroupRule( IpProtocol='tcp', FromPort=80, ToPort=80, CidrIp='0.0.0.0/0' ), ec2.SecurityGroupRule( IpProtocol='tcp', FromPort=8080, ToPort=8080, CidrIp='0.0.0.0/0' ) ], VpcId=ImportValue("prod2-VPCID"), Tags=Tags( Name='{0}SecurityGroup'.format(EC2_INSTANCE_NAME) ) )) self.template.add_resource(ec2.Instance( EC2_INSTANCE_NAME, ImageId=If( 'IsCentos7', FindInMap( "AWSRegionArch2Centos7LinuxAMI", Ref("AWS::Region"), FindInMap("AWSInstanceType2Arch", Ref(self.template.parameters['InstanceType']), "Arch")), FindInMap( "AWSRegionArch2AmazonLinuxAMI", Ref("AWS::Region"), FindInMap("AWSInstanceType2Arch", Ref(self.template.parameters['InstanceType']), "Arch")) ), InstanceType=Ref(self.template.parameters['InstanceType']), KeyName=FindInMap('Region2KeyPair', Ref('AWS::Region'), 'key'), SecurityGroupIds=[Ref(security_group)], SubnetId=ImportValue("prod2-SubnetPublicAZ2"), IamInstanceProfile=Ref( self.template.resources['InstanceProfileResource']), UserData=Base64(Join('', [ If('IsCentos7', Join('\n', [ '#!/bin/bash ', 'sudo yum update -y ', 'sudo yum install -y vim ', 'sudo yum install -y epel-release ', 'sudo yum install -y awscli ', '# Install CFN-BootStrap ', ('/usr/bin/easy_install --script-dir /opt/aws/bin ' 'https://s3.amazonaws.com/cloudformation-examples/' 'aws-cfn-bootstrap-latest.tar.gz '), ('cp -v /usr/lib/python2*/site-packages/aws_cfn_' 'bootstrap*/init/redhat/cfn-hup /etc/init.d '), 'chmod +x /etc/init.d/cfn-hup ', ]), Join('\n', [ '#!/bin/bash -xe ', 'yum update -y ', '# Update CFN-BootStrap ', 'yum update -y aws-cfn-bootstrap', 'sudo yum install -y awslogs ', ])), Join('', [ '# Install the files and packages from the metadata\n' '/opt/aws/bin/cfn-init -v ', ' --stack ', Ref('AWS::StackName'), ' --resource ', EC2_INSTANCE_NAME, ' --configsets InstallAndRun', ' --region ', Ref('AWS::Region'), ' --role ', Ref(self.template.resources['RoleResource']), '\n', '# Signal the status from cfn-init\n', '/opt/aws/bin/cfn-signal -e $? ' ' --stack ', Ref('AWS::StackName'), ' --resource ', EC2_INSTANCE_NAME, ' --region ', Ref('AWS::Region'), ' --role ', Ref(self.template.resources['RoleResource']), '\n' ]), ] ) ), Metadata=cloudformation.Metadata( cloudformation.Init( cloudformation.InitConfigSets( InstallAndRun=['Install', 'InstallLogs', 'InstallChef', 'Configure'] ), Install=cloudformation.InitConfig( packages={ 'yum': { 'stress': [], 'docker': [] } }, files={ '/etc/cfn/cfn-hup.conf': { 'content': Join('\n', [ '[main]', 'stack={{stackid}}', 'region={{region}}', 'interval=1' ]), 'context': { 'stackid': Ref('AWS::StackId'), 'region': Ref('AWS::Region') }, 'mode': '000400', 'owner': 'root', 'group': 'root' }, '/etc/cfn/hooks.d/cfn-auto-reloader.conf': { 'content': Join('\n', [ '[cfn-auto-reloader-hook]', 'triggers=post.update', ('path=Resources.{{instance_name}}' '.Metadata' '.AWS::CloudFormation::Init'), ('action=/opt/aws/bin/cfn-init -v ' ' --stack {{stack_name}} ' ' --resource {{instance_name}} ' ' --configsets {{config_sets}} ' ' --region {{region}} '), 'runas={{run_as}}' ]), 'context': { 'instance_name': EC2_INSTANCE_NAME, 'stack_name': Ref('AWS::StackName'), 'region': Ref('AWS::Region'), 'config_sets': 'InstallAndRun', 'run_as': 'root' } } }, services={ 'sysvinit': { 'docker': { 'enabled': 'true', 'ensureRunning': 'true' }, 'cfn-hup': { 'enabled': 'true', 'ensureRunning': 'true' } } }, commands={ '01_test': { 'command': 'echo "$CFNTEST" > Install.txt', 'env': { 'CFNTEST': 'I come from Install.' }, 'cwd': '~' } } ), InstallLogs=cloudformation.InitConfig( files={ '/etc/awslogs/awslogs.conf': { 'content': Join('\n', [ '[general]', ('state_file= /var/awslogs/' 'state/agent-state'), '', '[/var/log/cloud-init.log]', 'file = /var/log/cloud-init.log', 'log_group_name = {{log_group_name}}', ('log_stream_name = ' '{instance_id}/cloud-init.log'), 'datetime_format = {{datetime_format}}', '', '[/var/log/cloud-init-output.log]', 'file = /var/log/cloud-init-output.log', 'log_group_name = {{log_group_name}}', ('log_stream_name = ' '{instance_id}/cloud-init-output.log'), 'datetime_format = {{datetime_format}}', '', '[/var/log/cfn-init.log]', 'file = /var/log/cfn-init.log', 'log_group_name = {{log_group_name}}', ('log_stream_name = ' '{instance_id}/cfn-init.log'), 'datetime_format = {{datetime_format}}', '', '[/var/log/cfn-hup.log]', 'file = /var/log/cfn-hup.log', 'log_group_name = {{log_group_name}}', ('log_stream_name = ' '{instance_id}/cfn-hup.log'), 'datetime_format = {{datetime_format}}', '', '[/var/log/cfn-wire.log]', 'file = /var/log/cfn-wire.log', 'log_group_name = {{log_group_name}}', ('log_stream_name = ' '{instance_id}/cfn-wire.log'), 'datetime_format = {{datetime_format}}', '', '[/var/log/httpd]', 'file = /var/log/httpd/*', 'log_group_name = {{log_group_name}}', ('log_stream_name = ' '{instance_id}/httpd'), 'datetime_format = {{datetime_format}}' ]), 'context': { 'log_group_name': Ref( self.template.resources[ 'LogGroupResource']), 'datetime_format': '%d/%b/%Y:%H:%M:%S' } }, '/etc/awslogs/awscli.conf': { 'content': Join('\n', [ '[plugins]', 'cwlogs = cwlogs', '[default]', 'region = {{region}}' ]), 'context': { 'region': Ref('AWS::Region') }, 'mode': '000444', 'owner': 'root', 'group': 'root' } }, commands={ '01_create_state_directory': { 'command' : 'mkdir -p /var/awslogs/state' }, '02_test': { 'command': 'echo "$CFNTEST" > InstallLogs.txt', 'env': { 'CFNTEST': 'I come from install_logs.' }, 'cwd': '~' }, '03_install_aws_logs_if_centos': { 'command': If('IsCentos7', Join('\n', [ ('curl https://s3.amazonaws.com/aws-' 'cloudwatch/downloads/latest/awslogs-' 'agent-setup.py -O'), Join('', [ 'sudo python ./awslogs-agent-setup.py', ' --configfile /etc/awslogs/awslogs', '.conf --non-interactive --region ', Ref('AWS::Region')]) ]), Join('', [ 'echo "not installing awslogs from ', 'from source"' ])) } }, services={ 'sysvinit': { 'awslogs': { 'enabled': 'true', 'ensureRunning': 'true', 'files': ['/etc/awslogs/awslogs.conf'] } } } ), InstallChef=cloudformation.InitConfig( commands={ '01_invoke_omnitruck_install': { 'command': ( 'curl -L ' 'https://omnitruck.chef.io/install.sh | ' 'bash' ), } }, files={ '/etc/chef/client.rb': { 'source': S3_CLIENT_RB, 'mode': '000600', 'owner': 'root', 'group': 'root', 'authentication': 'S3AccessCreds' }, '/etc/chef/jasondebolt-validator.pem': { 'source': S3_VALIDATOR_PEM, 'mode': '000600', 'owner': 'root', 'group': 'root', 'authentication': 'S3AccessCreds' }, '/etc/chef/first-run.json': { 'source': S3_FIRST_RUN, 'mode': '000600', 'owner': 'root', 'group': 'root', 'authentication': 'S3AccessCreds' } } ), Configure=cloudformation.InitConfig( commands={ '01_test': { 'command': 'echo "$CFNTEST" > Configure.txt', 'env': { 'CFNTEST': 'I come from Configure.' }, 'cwd': '~' }, '02_chef_bootstrap': { 'command': ( 'chef-client -j ' '/etc/chef/first-run.json' ) } } ) ), cloudformation.Authentication({ 'S3AccessCreds': cloudformation.AuthenticationBlock( type='S3', roleName=Ref(self.template.resources['RoleResource'])) }) ), Tags=Tags( Name=Ref('AWS::StackName'), env='ops' ) )) self.template.add_output(Output( 'PublicIp', Description='Public IP of the newly created EC2 instance', Value=GetAtt(EC2_INSTANCE_NAME, 'PublicIp') )) self.template.add_output(Output( 'LinuxType', Description='The linux type of the EC2 instance.', Value=If('IsCentos7', 'centos_7', 'amazon_linux') )) return self.template
load_balancer = elb.LoadBalancer( 'LoadBalancer', template=template, Subnets=[ Ref(loadbalancer_a_subnet), Ref(loadbalancer_b_subnet), ], SecurityGroups=[Ref(load_balancer_security_group)], Listeners=listeners, HealthCheck=elb.HealthCheck( Target=If( tcp_health_check_condition, Join("", ["TCP:", web_worker_port]), Join("", [ web_worker_protocol, ":", web_worker_port, web_worker_health_check, ]), ), HealthyThreshold="2", UnhealthyThreshold="2", Interval="100", Timeout="10", ), CrossZone=True, ) template.add_output( Output("LoadBalancerDNSName", Description="Loadbalancer DNS",
def create_network(self): t = self.template self.create_gateway() vpc_id = Ref("VPC") t.add_resource(ec2.NetworkAcl('DefaultACL', VpcId=vpc_id)) self.create_nat_security_groups() subnets = {'public': [], 'private': []} net_types = subnets.keys() zones = [] for i in range(self.local_parameters["AZCount"]): az = Select(i, GetAZs("")) zones.append(az) name_suffix = i for net_type in net_types: name_prefix = net_type.capitalize() subnet_name = "%sSubnet%s" % (name_prefix, name_suffix) subnets[net_type].append(subnet_name) t.add_resource( ec2.Subnet(subnet_name, AvailabilityZone=az, VpcId=vpc_id, DependsOn=GW_ATTACH, CidrBlock=Select(i, Ref("%sSubnets" % name_prefix)), Tags=Tags(type=net_type))) route_table_name = "%sRouteTable%s" % (name_prefix, name_suffix) t.add_resource( ec2.RouteTable(route_table_name, VpcId=vpc_id, Tags=[ec2.Tag('type', net_type)])) t.add_resource( ec2.SubnetRouteTableAssociation( "%sRouteTableAssociation%s" % (name_prefix, name_suffix), SubnetId=Ref(subnet_name), RouteTableId=Ref(route_table_name))) route_name = '%sRoute%s' % (name_prefix, name_suffix) if net_type == 'public': # the public subnets are where the NAT instances live, # so their default route needs to go to the AWS # Internet Gateway t.add_resource( ec2.Route(route_name, RouteTableId=Ref(route_table_name), DestinationCidrBlock="0.0.0.0/0", GatewayId=Ref(GATEWAY))) self.create_nat_instance(i, subnet_name) else: # Private subnets are where actual instances will live # so their gateway needs to be through the nat instances t.add_resource( ec2.Route(route_name, RouteTableId=Ref(route_table_name), DestinationCidrBlock='0.0.0.0/0', InstanceId=If( "UseNatInstances", Ref(NAT_INSTANCE_NAME % name_suffix), Ref("AWS::NoValue")), NatGatewayId=If( "UseNatGateway", Ref(NAT_GATEWAY_NAME % name_suffix), Ref("AWS::NoValue")))) for net_type in net_types: t.add_output( Output("%sSubnets" % net_type.capitalize(), Value=Join(",", [Ref(sn) for sn in subnets[net_type]]))) self.template.add_output( Output("AvailabilityZones", Value=Join(",", zones)))
def add_default_instance_definition(db, for_cluster=False): """ Function to add DB Instance(s) :param ecs_composex.rds.rds_stack.Rds db: :param bool for_cluster: Whether this instance is added with default values for a DB Cluster """ props = { "Engine": Ref(DB_ENGINE_NAME), "EngineVersion": If( rds_conditions.USE_CLUSTER_CON_T, NoValue, Ref(DB_ENGINE_VERSION), ), "StorageType": If( rds_conditions.USE_CLUSTER_CON_T, Ref(AWS_NO_VALUE), Ref(DB_STORAGE_TYPE), ), "DBSubnetGroupName": If( rds_conditions.NOT_USE_CLUSTER_CON_T, Ref(db.db_subnet_group), Ref(AWS_NO_VALUE), ), "AllocatedStorage": If( rds_conditions.USE_CLUSTER_CON_T, Ref(AWS_NO_VALUE), Ref(DB_STORAGE_CAPACITY), ), "DBInstanceClass": Ref(DB_INSTANCE_CLASS), "MasterUsername": If( rds_conditions.USE_CLUSTER_OR_SNAPSHOT_CON_T, Ref(AWS_NO_VALUE), Sub( f"{{{{resolve:secretsmanager:${{{db.db_secret.title}}}:SecretString:username}}}}" ), ), "DBClusterIdentifier": If( rds_conditions.USE_CLUSTER_CON_T, Ref(db.cfn_resource), Ref(AWS_NO_VALUE), ), "MasterUserPassword": If( rds_conditions.USE_CLUSTER_CON_T, Ref(AWS_NO_VALUE), Sub( f"{{{{resolve:secretsmanager:${{{db.db_secret.title}}}:SecretString:password}}}}" ), ), "VPCSecurityGroups": If( rds_conditions.USE_CLUSTER_CON_T, Ref(AWS_NO_VALUE), [GetAtt(db.db_sg, "GroupId")], ), "Tags": Tags(SecretName=Ref(db.db_secret), Name=db.logical_name), "StorageEncrypted": True, } if db.parameters and keyisset("MultiAZ", db.parameters): props["MultiAZ"] = True if for_cluster and keyisset("StorageEncrypted", props): del props["StorageEncrypted"] instance = DBInstance(f"Instance{db.logical_name}", **props) return instance
def add_resources(self): """Add resources to template.""" template = self.template variables = self.get_variables() vpnrole = template.add_resource( iam.Role( 'VPNRole', AssumeRolePolicyDocument=iam_policies.assumerolepolicy('ec2'), ManagedPolicyArns=variables['VPNManagedPolicies'].ref, Path='/', Policies=[ iam.Policy( PolicyName=Join('-', [ 'customer-vpn-server-role', variables['EnvironmentName'].ref, variables['CustomerName'].ref ]), PolicyDocument=Policy( Version='2012-10-17', Statement=[ # ModifyInstanceAttribute is for src/dst check Statement(Action=[ awacs.ec2.DescribeRouteTables, awacs.ec2.DescribeAddresses, awacs.ec2.AssociateAddress, awacs.ec2.CreateRoute, awacs.ec2.ReplaceRoute, awacs.ec2.ModifyInstanceAttribute ], Effect=Allow, Resource=['*']), Statement( Action=[ awacs.aws.Action('s3', 'Get*'), awacs.aws.Action('s3', 'List*'), awacs.aws.Action('s3', 'Put*') ], Effect=Allow, Resource=[ Join( '', [ 'arn:aws:s3:::', variables['ChefDataBucketName'] .ref, # noqa pylint: disable=line-too-long '/', variables['EnvironmentName']. ref, '/', variables['BucketKey'].ref, '/*' ]) ]), Statement( Action=[awacs.s3.ListBucket], Effect=Allow, Resource=[ Join('', [ 'arn:aws:s3:::', variables['ChefDataBucketName'].ref ]) # noqa pylint: disable=line-too-long ], Condition=Condition( StringLike( 's3:prefix', [ Join( '', [ variables[ 'EnvironmentName']. ref, # noqa pylint: disable=line-too-long '/', variables['BucketKey']. ref, # noqa pylint: disable=line-too-long '/*' ]) ]))) ])) ])) vpninstanceprofile = template.add_resource( iam.InstanceProfile('VPNInstanceProfile', Path='/', Roles=[Ref(vpnrole)])) amiid = template.add_resource( cfn_custom_classes.AMIId( 'AMIId', Condition='MissingVPNAMI', Platform=variables['VPNOS'].ref, Region=Ref('AWS::Region'), ServiceToken=variables['AMILookupArn'].ref)) # Lookup subnets from core VPC stack subnetlookuplambdarole = template.add_resource( iam.Role( 'SubnetLookupLambdaRole', Condition='PrivateSubnetCountOmitted', AssumeRolePolicyDocument=iam_policies.assumerolepolicy( 'lambda'), ManagedPolicyArns=[ IAM_ARN_PREFIX + 'AWSLambdaBasicExecutionRole' ], Policies=[ iam.Policy( PolicyName=Join('-', [ 'subnetlookup-lambda-role', variables['EnvironmentName'].ref, variables['CustomerName'].ref ]), PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Action=[ awacs.aws.Action( 'cloudformation', 'DescribeStack*'), awacs.aws.Action( 'cloudformation', 'Get*') ], Effect=Allow, Resource=[ Join('', [ 'arn:aws:cloudformation:', Ref('AWS::Region'), ':', Ref('AWS::AccountId'), ':stack/', variables['CoreVPCStack'].ref, '/*' ]) ]) ])) ])) cfncustomresourcesubnetlookup = template.add_resource( awslambda.Function( 'CFNCustomResourceSubnetLookup', Condition='PrivateSubnetCountOmitted', Description='Find subnets created by core stack', Code=awslambda.Code( ZipFile=variables['SubnetLookupLambdaFunction']), Handler='index.handler', Role=GetAtt(subnetlookuplambdarole, 'Arn'), Runtime='python2.7', Timeout=10)) subnetlookup = template.add_resource( cfn_custom_classes.SubnetLookup( 'SubnetLookup', Condition='PrivateSubnetCountOmitted', CoreVPCStack=variables['CoreVPCStack'].ref, Region=Ref('AWS::Region'), ServiceToken=GetAtt(cfncustomresourcesubnetlookup, 'Arn'))) common_userdata_prefix = [ "#cloud-config\n", "package_update: true\n", "package_upgrade: false\n", "write_files:\n", " - path: /usr/local/bin/update_vpn_routes.sh\n", " permissions: '0755'\n", " content: |\n", " #!/bin/bash\n", " \n", " export AWS_DEFAULT_REGION=\"", Ref('AWS::Region'), "\"\n", " my_instance_id=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)\n", # noqa pylint: disable=line-too-long " \n", " publicroutetableid=", If( 'PrivateSubnetCountOmitted', GetAtt(subnetlookup.title, 'PublicRouteTableId'), If( 'PublicRouteTableSpecified', variables['PublicRouteTable'].ref, ImportValue( Sub("${%s}-PublicRouteTable" % variables['CoreVPCStack'].name)))), # noqa pylint: disable=line-too-long "\n", " private_route_tables=(", If( 'PrivateSubnetCountOmitted', GetAtt(subnetlookup.title, 'PrivateRouteTables'), If( '3PrivateSubnetsCreated', If( 'PublicRouteTableSpecified', Join(' ', [ variables['PrivateRouteTable1'].ref, variables['PrivateRouteTable2'].ref, variables['PrivateRouteTable3'].ref ]), Join( ' ', [ ImportValue( Sub("${%s}-PrivateRouteTable1" % variables['CoreVPCStack'].name)), # noqa pylint: disable=line-too-long ImportValue( Sub("${%s}-PrivateRouteTable2" % variables['CoreVPCStack'].name)), # noqa pylint: disable=line-too-long ImportValue( Sub("${%s}-PrivateRouteTable3" % variables['CoreVPCStack'].name)) ])), # noqa pylint: disable=line-too-long If( '2PrivateSubnetsCreated', If( 'PublicRouteTableSpecified', Join(' ', [ variables['PrivateRouteTable1'].ref, variables['PrivateRouteTable2'].ref ]), Join( ' ', [ ImportValue( Sub("${%s}-PrivateRouteTable1" % variables['CoreVPCStack'].name)), # noqa pylint: disable=line-too-long ImportValue( Sub("${%s}-PrivateRouteTable2" % variables['CoreVPCStack'].name)) ])), # noqa pylint: disable=line-too-long, If( 'PublicRouteTableSpecified', variables['PrivateRouteTable1'].ref, ImportValue( Sub("${%s}-PrivateRouteTable1" % variables['CoreVPCStack'].name)))))), # noqa pylint: disable=line-too-long ")\n", "\n", " openvpnroutepubdest=", variables['VPNSubnet'].ref, "\n", " \n", " # Disabling sourceDestCheck\n", " aws ec2 modify-instance-attribute --instance-id ${my_instance_id} --source-dest-check \"{\\\"Value\\\": false}\"\n", # noqa pylint: disable=line-too-long " \n", " if aws ec2 describe-route-tables | grep ${openvpnroutepubdest}; then\n", # noqa pylint: disable=line-too-long " # Update 'OpenVPNRoutePub' to point to this instance\n", # noqa pylint: disable=line-too-long " aws ec2 replace-route --route-table-id ${publicroutetableid} --destination-cidr-block ${openvpnroutepubdest} --instance-id ${my_instance_id}\n", # noqa pylint: disable=line-too-long " # Update private routes\n", " for i in \"${private_route_tables[@]}\"\n", " do\n", " aws ec2 replace-route --route-table-id $i --destination-cidr-block ${openvpnroutepubdest} --instance-id ${my_instance_id}\n", # noqa pylint: disable=line-too-long " done\n", " else\n", " # Create 'OpenVPNRoutePub'\n", " aws ec2 create-route --route-table-id ${publicroutetableid} --destination-cidr-block ${openvpnroutepubdest} --instance-id ${my_instance_id}\n", # noqa pylint: disable=line-too-long " # Create private routes\n", " for i in \"${private_route_tables[@]}\"\n", " do\n", " aws ec2 create-route --route-table-id $i --destination-cidr-block ${openvpnroutepubdest} --instance-id ${my_instance_id}\n", # noqa pylint: disable=line-too-long " done\n", " fi\n", " \n", "\n", " - path: /etc/chef/sync_cookbooks.sh\n", " permissions: '0755'\n", " owner: 'root'\n", " group: 'root'\n", " content: |\n", " #!/bin/bash\n", " set -e -o pipefail\n", " \n", " aws --region ", Ref('AWS::Region'), " s3 sync s3://", variables['ChefBucketName'].ref, "/", variables['EnvironmentName'].ref, "/", variables['BucketKey'].ref, "/ /etc/chef/\n", " if compgen -G \"/etc/chef/cookbooks-*.tar.gz\" > /dev/null; then\n", # noqa pylint: disable=line-too-long " echo \"Cookbook archive found.\"\n", " if [ -d \"/etc/chef/cookbooks\" ]; then\n", " echo \"Removing previously extracted cookbooks.\"\n", # noqa pylint: disable=line-too-long " rm -r /etc/chef/cookbooks\n", " fi\n", " echo \"Extracting highest numbered cookbook archive.\"\n", # noqa pylint: disable=line-too-long " cbarchives=(/etc/chef/cookbooks-*.tar.gz)\n", " tar -zxf \"${cbarchives[@]: -1}\" -C /etc/chef\n", " chown -R root:root /etc/chef\n", " fi\n", " \n", "\n", " - path: /etc/chef/perform_chef_run.sh\n", " permissions: '0755'\n", " owner: 'root'\n", " group: 'root'\n", " content: |\n", " #!/bin/bash\n", " set -e -o pipefail\n", " \n", " chef-client -z -r '", If('ChefRunListSpecified', variables['ChefRunList'].ref, Join('', ['recipe[', variables['CustomerName'].ref, '_vpn]'])), "' -c /etc/chef/client.rb -E ", variables['EnvironmentName'].ref, " --force-formatter --no-color -F min\n", "\n", " - path: /etc/chef/client.rb\n", " permissions: '0644'\n", " owner: 'root'\n", " group: 'root'\n", " content: |\n", " log_level :info\n", " log_location '/var/log/chef/client.log'\n", " ssl_verify_mode :verify_none\n", " cookbook_path '/etc/chef/cookbooks'\n", " node_path '/etc/chef/nodes'\n", " role_path '/etc/chef/roles'\n", " data_bag_path '/etc/chef/data_bags'\n", " environment_path '/etc/chef/environments'\n", " local_mode 'true'\n", "\n", " - path: /etc/chef/environments/", variables['EnvironmentName'].ref, ".json\n", " permissions: '0644'\n", " owner: 'root'\n", " group: 'root'\n", " content: |\n", " {\n", " \"name\": \"", variables['EnvironmentName'].ref, "\",\n", " \"default_attributes\": {\n", " \"sturdy\": {\n", " \"openvpn\": {\n", " \"core_vpc_cidr\": \"", variables['VpcCidr'].ref, "\",\n", " \"vpn_elastic_ip\": \"", variables['VpnEipPublicIp'].ref, "\",\n", " \"vpn_subnet_cidr\": \"", variables['VPNSubnet'].ref, "\",\n", " \"chef_data_bucket_name\": \"", variables['ChefDataBucketName'].ref, "\",\n", " \"chef_data_bucket_folder\": \"", variables['EnvironmentName'].ref, "/", variables['BucketKey'].ref, "\",\n", " \"chef_data_bucket_region\": \"", Ref('AWS::Region'), "\"\n", " }\n", " }\n", " },\n", " \"json_class\": \"Chef::Environment\",\n", " \"description\": \"", variables['EnvironmentName'].ref, " environment\",\n", " \"chef_type\": \"environment\"\n", " }\n", "\n", "runcmd:\n", " - set -euf\n", " - echo 'Attaching EIP'\n", " - pip install aws-ec2-assign-elastic-ip\n", # Allowing this command to fail (with ||true) as sturdy_openvpn # 2.3.0+ can handle this association instead. This will be removed # entirely in the next major release of this module (at which time # use of the updated sturdy_openvpn cookbook will be required) " - aws-ec2-assign-elastic-ip --region ", Ref('AWS::Region'), " --valid-ips ", variables['VpnEipPublicIp'].ref, " || true\n", " - echo 'Updating Routes'\n", " - /usr/local/bin/update_vpn_routes.sh\n", " - echo 'Installing Chef'\n", " - curl --max-time 10 --retry-delay 5 --retry 5 -L https://www.chef.io/chef/install.sh | bash -s -- -v ", # noqa pylint: disable=line-too-long variables['ChefClientVersion'].ref, "\n", " - echo 'Configuring Chef'\n", " - mkdir -p /var/log/chef /etc/chef/data_bags /etc/chef/nodes /etc/chef/roles\n", # noqa pylint: disable=line-too-long " - chmod 0755 /etc/chef\n", " - /etc/chef/sync_cookbooks.sh\n", " - /etc/chef/perform_chef_run.sh\n" ] vpnserverlaunchconfig = template.add_resource( autoscaling.LaunchConfiguration( 'VpnServerLaunchConfig', AssociatePublicIpAddress=True, BlockDeviceMappings=[ # CentOS AMIs don't include this by default ec2.BlockDeviceMapping( DeviceName='/dev/sda1', Ebs=ec2.EBSBlockDevice(DeleteOnTermination=True)) ], IamInstanceProfile=Ref(vpninstanceprofile), ImageId=If('MissingVPNAMI', GetAtt(amiid, 'ImageId'), variables['VPNAMI'].ref), InstanceType=variables['ManagementInstanceType'].ref, InstanceMonitoring=False, # extra granularity not worth cost KeyName=If('SSHKeySpecified', variables['KeyName'].ref, Ref('AWS::NoValue')), PlacementTenancy=variables['VpcInstanceTenancy'].ref, SecurityGroups=variables['VPNSecurityGroups'].ref, UserData=If( 'RHELUserData', Base64( Join( '', common_userdata_prefix + [ "yum_repos:\n", " epel:\n", " name: Extra Packages for $releasever - $basearch\n", # noqa pylint: disable=line-too-long " baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch\n", # noqa pylint: disable=line-too-long " enabled: true\n", " failovermethod: priority\n", " gpgcheck: true\n", " gpgkey: https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7\n", # noqa pylint: disable=line-too-long "packages:\n", " - awscli\n", " - python-pip\n", " - python2-boto\n", " - python2-boto3\n" ])), Base64( Join( '', common_userdata_prefix + [ "packages:\n", " - awscli\n", " - python-pip\n", " - python-boto\n", " - python-boto3\n" ]))))) template.add_resource( autoscaling.AutoScalingGroup( 'VPNServerASG', MinSize=1, MaxSize=1, LaunchConfigurationName=Ref(vpnserverlaunchconfig), Tags=[ autoscaling.Tag( 'Name', Join('-', [ variables['CustomerName'].ref, 'vpn', variables['EnvironmentName'].ref ]), True), autoscaling.Tag('environment', variables['EnvironmentName'].ref, True), autoscaling.Tag('customer', variables['CustomerName'].ref, True) ], VPCZoneIdentifier=If( 'PublicSubnetsOmitted', GetAtt(subnetlookup.title, 'PublicSubnetList'), variables['PublicSubnets'].ref)))
alarm = t.add_resource( cloudwatch.Alarm( 'Alarm', Condition='AlarmEnabledCondition', ActionsEnabled=Ref(param_notification_enabled), MetricName=Ref(param_metric_name), Namespace=Ref(param_metric_namespace), ComparisonOperator=Ref(param_alarm_comparison_operator), Statistic=Ref(param_alarm_statistic), Threshold=Ref(param_alarm_threshold), EvaluationPeriods=Ref(param_alarm_evaluation_periods), Period=Ref(param_alarm_period), Unit=Ref(param_alarm_unit), AlarmActions=[ If('AlarmTopicCondition', Ref(param_alarm_topic), Ref(AWS_NO_VALUE)) ], OKActions=[ If('OkTopicCondition', Ref(param_ok_topic), Ref(AWS_NO_VALUE)) ], InsufficientDataActions=[ If('InsufficientDataTopicCondition', Ref(param_insufficient_data_topic), Ref(AWS_NO_VALUE)) ], TreatMissingData=Ref(param_treat_missing_data), )) # # Output # t.add_output([
"Principal": { "Service": ["lambda.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] }, Path="/lambda/", Policies=[lambda_policy]) t.add_resource(lambda_role) lambda_function = t.add_resource( Function( "ChaosLambdaFunction", Description="CloudFormation Lambda", Code=lambda_code, Handler=If(default_on_condition, module_name + ".handler", module_name + ".handler_default_off"), MemorySize=128, Role=GetAtt(lambda_role, "Arn"), Runtime="python2.7", Timeout=30, )) chaos_lambda_rule = t.add_resource( Rule("ChaosLambdaRule", Description="Trigger Chaos Lambda according to a schedule", State="ENABLED", ScheduleExpression=Ref(chaos_schedule), Targets=[ Target(Arn=GetAtt(lambda_function, "Arn"), Id="ChaosLambdaRuleTarget") ]))
Type="String", NoEcho=True, )) conditions = { "IsMultiNodeCluster": Equals(Ref("ClusterType"), "multi-mode"), } for k in conditions: t.add_condition(k, conditions[k]) redshiftcluster = t.add_resource( Cluster( "RedshiftCluster", ClusterType=Ref("ClusterType"), NumberOfNodes=If("IsMultiNodeCluster", Ref("NumberOfNodes"), Ref("AWS::NoValue")), NodeType=Ref("NodeType"), DBName=Ref("DatabaseName"), MasterUsername=Ref("MasterUsername"), MasterUserPassword=Ref("MasterUserPassword"), ClusterParameterGroupName=Ref("RedshiftClusterParameterGroup"), DeletionPolicy="Snapshot", )) amazonredshiftparameter1 = AmazonRedshiftParameter( "AmazonRedshiftParameter1", ParameterName="enable_user_activity_logging", ParameterValue="true", ) redshiftclusterparametergroup = t.add_resource(
def main(): t = Template() t.add_version("2010-09-09") t.add_description("testing") InstUserData = list() InstUserData = [ '#!/usr/bin/env bash\n', '\n', 'set -x\n', '\n', 'my_wait_handle="', Ref('InstanceWaitHandle'), '"\n', 'curl -X PUT -H \'Content-Type:\' --data-binary \'{ "Status" : "SUCCESS", "Reason" : "Instance launched", "UniqueId" : "launch001", "Data" : "Instance launched."}\' "${my_wait_handle}"', '\n', '\n', ] EC2KeyName = t.add_parameter(Parameter( 'EC2KeyName', Type="AWS::EC2::KeyPair::KeyName", Description="Name of an existing EC2 KeyPair to enable SSH access to the instance.", ConstraintDescription="REQUIRED: Must be a valud EC2 key pair" )) OperatingSystem = t.add_parameter(Parameter( 'OperatingSystem', Type="String", Description="Operating System", Default="centos7", AllowedValues=[ "alinux2", "centos7", "rhel7", ], ConstraintDescription="Must be: alinux2, centos7, rhel7" )) myInstanceType = t.add_parameter(Parameter( 'MyInstanceType', Type="String", Description="Instance type", Default="r4.16xlarge", AllowedValues=[ "m4.16xlarge", "m4.10xlarge", "r4.16xlarge", "c8.8xlarge" ], ConstraintDescription= "Must an EC2 instance type from the list" )) VPCId = t.add_parameter(Parameter( 'VPCId', Type="AWS::EC2::VPC::Id", Description="VPC Id for this instance" )) Subnet = t.add_parameter(Parameter( 'Subnet', Type="AWS::EC2::Subnet::Id", Description="Subnet IDs" )) ExistingSecurityGroup = t.add_parameter(Parameter( 'ExistingSecurityGroup', Type="AWS::EC2::SecurityGroup::Id", Description="OPTIONAL: Choose an existing Security Group ID, e.g. sg-abcd1234" )) UsePublicIp = t.add_parameter(Parameter( 'UsePublicIp', Type="String", Description="Should a public IP address be given to the instance", Default="true", ConstraintDescription="true/false", AllowedValues=[ "true", "false" ] )) SshAccessCidr = t.add_parameter(Parameter( 'SshAccessCidr', Type="String", Description="CIDR Block for SSH access, default 0.0.0.0/0", Default="0.0.0.0/0", AllowedPattern="(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", ConstraintDescription="Must be a valid CIDR x.x.x.x/x" )) RootRole = t.add_resource(iam.Role( "RootRole", AssumeRolePolicyDocument={"Statement": [{ "Effect": "Allow", "Principal": { "Service": [ "ec2.amazonaws.com" ] }, "Action": [ "sts:AssumeRole" ] }]} )) SshSecurityGroup = t.add_resource(SecurityGroup( "SshSecurityGroup", VpcId = Ref(VPCId), GroupDescription = "SSH Secuirty group", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp=Ref(SshAccessCidr), ), ] )) RootInstanceProfile = t.add_resource(InstanceProfile( "RootInstanceProfile", Roles=[Ref(RootRole)] )) tags = Tags(Name=Ref("AWS::StackName")) myInstance = t.add_resource(ec2.Instance( 'MyInstance', ImageId=FindInMap("AWSRegionAMI", Ref("AWS::Region"), Ref(OperatingSystem)), KeyName=Ref(EC2KeyName), InstanceType=(Ref(myInstanceType)), NetworkInterfaces=[ NetworkInterfaceProperty( GroupSet=If( "not_existing_sg", [Ref(SshSecurityGroup)], [Ref(SshSecurityGroup), Ref(ExistingSecurityGroup)] ), AssociatePublicIpAddress=Ref(UsePublicIp), DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(Subnet))], IamInstanceProfile=(Ref(RootInstanceProfile)), UserData=Base64(Join('', InstUserData)), )) t.add_mapping('AWSRegionAMI', { "ap-northeast-1": { "centos7": "ami-8e8847f1", "rhel7": "ami-6b0d5f0d" }, "ap-northeast-2": { "centos7": "ami-bf9c36d1", "rhel7": "ami-3eee4150" }, "ap-south-1": { "centos7": "ami-1780a878", "rhel7": "ami-5b673c34" }, "ap-southeast-1": { "centos7": "ami-8e0205f2", "rhel7": "ami-76144b0a" }, "ap-southeast-2": { "centos7": "ami-d8c21dba", "rhel7": "ami-67589505" }, "ca-central-1": { "centos7": "ami-e802818c", "rhel7": "ami-49f0762d" }, "eu-central-1": { "centos7": "ami-dd3c0f36", "rhel7": "ami-c86c3f23" }, "eu-west-1": { "centos7": "ami-3548444c", "rhel7": "ami-7c491f05" }, "eu-west-2": { "centos7": "ami-00846a67", "rhel7": "ami-7c1bfd1b" }, "eu-west-3": { "centos7": "ami-262e9f5b", "rhel7": "ami-5026902d" }, "sa-east-1": { "centos7": "ami-cb5803a7", "rhel7": "ami-b0b7e3dc" }, "us-east-1": { "centos7": "ami-9887c6e7", "rhel7": "ami-6871a115" }, "us-east-2": { "centos7": "ami-9c0638f9", "rhel7": "ami-03291866" }, "us-west-1": { "centos7": "ami-4826c22b", "rhel7": "ami-18726478" }, "us-west-2": { "centos7": "ami-3ecc8f46", "rhel7": "ami-28e07e50" } }) t.add_condition( "not_existing_sg", Equals(Ref(ExistingSecurityGroup), "") ) t.add_condition( "Has_Public_Ip", Equals(Ref(UsePublicIp), "true") ) mywaithandle = t.add_resource(WaitConditionHandle('InstanceWaitHandle')) mywaitcondition = t.add_resource(WaitCondition( "InstanceWaitCondition", Handle=Ref(mywaithandle), Timeout="1500", DependsOn="MyInstance" )) t.add_output([ Output( "InstanceID", Description="Instance ID", Value=Ref(myInstance) ) ]) t.add_output([ Output( "InstancePrivateIP", Value=GetAtt('MyInstance', 'PrivateIp') ) ]) t.add_output([ Output( "InstancePublicIP", Value=GetAtt('MyInstance', 'PublicIp'), Condition="Has_Public_Ip" ) ]) print(t.to_json(indent=2))
def main(): t = Template() t.add_version("2010-09-09") t.add_description( "Currently supporting RHEL/CentOS 7.5. Setup IAM role and security groups, " "launch instance, create/attach 10 EBS volumes, install/fix ZFS " "(http://download.zfsonlinux.org/epel/zfs-release.el7_5.noarch.rpm), " "create zfs RAID6 pool, setup NFS server, export NFS share") InstUserData = list() InstUserData = [ '#!/usr/bin/env bash\n', '\n', 'set -x\n', '\n', '##exit 0\n', # use this to disable all user-data and bring up files '\n', 'zfs_pool_name="', Ref('ZfsPool'), '"\n', 'zfs_mount_point="', Ref('ZfsMountPoint'), '"\n', 'nfs_cidr_block="', Ref('NFSCidr'), '"\n', 'nfs_opts="', Ref('NFSOpts'), '"\n', 'my_wait_handle="', Ref('NFSInstanceWaitHandle'), '"\n', '\n', ] with open( '_include/Tropo_build_zfs_export_nfs.sh', 'r', ) as ud_file: user_data_file = ud_file.readlines() for l in user_data_file: InstUserData.append(l) t.add_metadata({ 'AWS::CloudFormation::Interface': { 'ParameterGroups': [{ 'Label': { 'default': 'Instance Configuration' }, 'Parameters': [ "OperatingSystem", "VPCId", "Subnet", "UsePublicIp", "CreateElasticIP", "EC2KeyName", "NFSInstanceType", "SshAccessCidr", "ExistingSecurityGroup", "ExistingPlacementGroup", "S3BucketName" ] }, { 'Label': { 'default': 'Storage Options - Required' }, 'Parameters': [ "RAIDLevel", "VolumeSize", "VolumeType", "EBSVolumeType", "VolumeIops" ] }, { 'Label': { 'default': 'ZFS Pool and FS Options - Required' }, 'Parameters': ["ZfsPool", "ZfsMountPoint"] }, { 'Label': { 'default': 'NFS Options - Required' }, 'Parameters': ["NFSCidr", "NFSOpts"] }], 'ParameterLabels': { 'OperatingSystem': { 'default': 'Operating System of AMI' }, 'VPCId': { 'default': 'VPC ID' }, 'Subnet': { 'default': 'Subnet ID' }, 'UsePublicIp': { 'default': 'Assign a Public IP ' }, 'CreateElasticIP': { 'default': 'Create and use an EIP ' }, 'EC2KeyName': { 'default': 'EC2 Key Name' }, 'NFSInstanceType': { 'default': 'Instance Type' }, 'SshAccessCidr': { 'default': 'SSH Access CIDR Block' }, 'ExistingSecurityGroup': { 'default': 'OPTIONAL: Existing Security Group' }, 'ExistingPlacementGroup': { 'default': 'OPTIONAL: Existing Placement Group' }, 'S3BucketName': { 'default': 'Optional S3 Bucket Name' }, 'RAIDLevel': { 'default': 'RAID Level' }, 'VolumeSize': { 'default': 'Volume size of the EBS vol' }, 'VolumeType': { 'default': 'Volume type of the EBS vol' }, 'EBSVolumeType': { 'default': 'Volume type of the EBS vol' }, 'VolumeIops': { 'default': 'IOPS for each EBS vol (only for io1)' }, 'ZfsPool': { 'default': 'ZFS pool name' }, 'ZfsMountPoint': { 'default': 'Mount Point' }, 'NFSCidr': { 'default': 'NFS CIDR block for mounts' }, 'NFSOpts': { 'default': 'NFS options' }, } } }) EC2KeyName = t.add_parameter( Parameter( 'EC2KeyName', Type="AWS::EC2::KeyPair::KeyName", Description= "Name of an existing EC2 KeyPair to enable SSH access to the instance.", ConstraintDescription="REQUIRED: Must be a valud EC2 key pair")) OperatingSystem = t.add_parameter( Parameter('OperatingSystem', Type="String", Description="Operating System", Default="centos7", AllowedValues=[ "alinux2", "centos7", "rhel7", ], ConstraintDescription="Must be: alinux2, centos7, rhel7")) NFSInstanceType = t.add_parameter( Parameter( 'NFSInstanceType', Type="String", Description="NFS instance type", Default="r4.16xlarge", AllowedValues=[ "m4.16xlarge", "m4.10xlarge", "r4.16xlarge", "c8.8xlarge" ], ConstraintDescription="Must an EC2 instance type from the list")) VolumeType = t.add_parameter( Parameter( 'VolumeType', Type="String", Description="Type of EBS volume", Default="EBS", AllowedValues=["EBS", "InstanceStore"], ConstraintDescription="Volume type has to EBS or InstanceStore")) EBSVolumeType = t.add_parameter( Parameter('EBSVolumeType', Description="Type of EBS volumes to create", Type="String", Default="io1", ConstraintDescription="Must be a either: io1, gp2, st1", AllowedValues=["io1", "gp2", "st1"])) VolumelSize = t.add_parameter( Parameter('VolumeSize', Type="Number", Default="500", Description="Volume size in GB")) VolumeIops = t.add_parameter( Parameter('VolumeIops', Type="Number", Default="20000", Description="IOPS for the EBS volume")) RAIDLevel = t.add_parameter( Parameter( 'RAIDLevel', Description="RAID Level, currently only 6 (8+2p) is supported", Type="String", Default="0", AllowedValues=["0"], ConstraintDescription="Must be 0")) ZfsPool = t.add_parameter( Parameter('ZfsPool', Description="ZFS pool name", Type="String", Default="v01")) ZfsMountPoint = t.add_parameter( Parameter( 'ZfsMountPoint', Description= "ZFS mount point, absolute path will be /pool_name/mount_point (e.g. /v01/testzfs)", Type="String", Default="testzfs")) VPCId = t.add_parameter( Parameter('VPCId', Type="AWS::EC2::VPC::Id", Description="VPC Id for this instance")) ExistingPlacementGroup = t.add_parameter( Parameter('ExistingPlacementGroup', Type="String", Description="OPTIONAL: Existing placement group")) Subnet = t.add_parameter( Parameter('Subnet', Type="AWS::EC2::Subnet::Id", Description="Subnet IDs")) ExistingSecurityGroup = t.add_parameter( Parameter( 'ExistingSecurityGroup', Type="AWS::EC2::SecurityGroup::Id", Description= "OPTIONAL: Choose an existing Security Group ID, e.g. sg-abcd1234") ) UsePublicIp = t.add_parameter( Parameter( 'UsePublicIp', Type="String", Description="Should a public IP address be given to the instance", Default="true", ConstraintDescription="true/talse", AllowedValues=["true", "false"])) CreateElasticIP = t.add_parameter( Parameter( 'CreateElasticIP', Type="String", Description= "Create an Elasic IP address, that will be assinged to an instance", Default="true", ConstraintDescription="true/false", AllowedValues=["true", "false"])) S3BucketName = t.add_parameter( Parameter('S3BucketName', Type="String", Description="S3 bucket to allow this instance read access.")) SshAccessCidr = t.add_parameter( Parameter( 'SshAccessCidr', Type="String", Description="CIDR Block for SSH access, default 0.0.0.0/0", Default="0.0.0.0/0", AllowedPattern= "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", ConstraintDescription="Must be a valid CIDR x.x.x.x/x")) NFSCidr = t.add_parameter( Parameter( 'NFSCidr', Type="String", Description= "CIDR for NFS Security Group and NFS clients, to allow all access use 0.0.0.0/0", Default="10.0.0.0/16", AllowedPattern= "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", ConstraintDescription="Must be a valid CIDR x.x.x.x/x")) NFSOpts = t.add_parameter( Parameter( 'NFSOpts', Description="NFS export options", Type="String", Default="(rw,async,no_root_squash,wdelay,no_subtree_check,no_acl)") ) VarLogMessagesFile = t.add_parameter( Parameter( 'VarLogMessagesFile', Type="String", Description= "S3 bucket and file name for log CloudWatch config (e.g. s3://jouser-logs/var-log-message.config)" )) RootRole = t.add_resource( iam.Role("RootRole", AssumeRolePolicyDocument={ "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] }, Policies=[ iam.Policy(PolicyName="s3bucketaccess", PolicyDocument={ "Statement": [{ "Effect": "Allow", "Action": ["s3:GetObject"], "Resource": { "Fn::Join": [ "", [ "arn:aws:s3:::", { "Ref": "S3BucketName" }, "/*" ] ] } }, { "Effect": "Allow", "Action": ["s3:ListBucket"], "Resource": { "Fn::Join": [ "", [ "arn:aws:s3:::", { "Ref": "S3BucketName" } ] ] } }], }), ])) NFSSecurityGroup = t.add_resource( SecurityGroup("NFSSecurityGroup", VpcId=Ref(VPCId), GroupDescription="NFS Secuirty group", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="2049", ToPort="2049", CidrIp=Ref(NFSCidr), ), ])) SshSecurityGroup = t.add_resource( SecurityGroup("SshSecurityGroup", VpcId=Ref(VPCId), GroupDescription="SSH Secuirty group", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp=Ref(SshAccessCidr), ), ])) RootInstanceProfile = t.add_resource( InstanceProfile("RootInstanceProfile", Roles=[Ref(RootRole)])) EIPAddress = t.add_resource( EIP('EIPAddress', Domain='vpc', Condition="create_elastic_ip")) tags = Tags(Name=Ref("AWS::StackName")) NFSInstance = t.add_resource( ec2.Instance( 'NFSInstance', ImageId=FindInMap("AWSRegionAMI", Ref("AWS::Region"), Ref(OperatingSystem)), KeyName=Ref(EC2KeyName), InstanceType=(Ref(NFSInstanceType)), NetworkInterfaces=[ NetworkInterfaceProperty( GroupSet=If("not_existing_sg", [Ref(NFSSecurityGroup), Ref(SshSecurityGroup)], [ Ref(NFSSecurityGroup), Ref(SshSecurityGroup), Ref(ExistingSecurityGroup) ]), AssociatePublicIpAddress=Ref(UsePublicIp), DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(Subnet)) ], IamInstanceProfile=(Ref(RootInstanceProfile)), PlacementGroupName=(Ref(ExistingPlacementGroup)), BlockDeviceMappings=If( 'vol_type_ebs', [ ec2.BlockDeviceMapping( DeviceName="/dev/sdh", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ec2.BlockDeviceMapping( DeviceName="/dev/sdi", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ec2.BlockDeviceMapping( DeviceName="/dev/sdj", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ec2.BlockDeviceMapping( DeviceName="/dev/sdk", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ec2.BlockDeviceMapping( DeviceName="/dev/sdl", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ec2.BlockDeviceMapping( DeviceName="/dev/sdm", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ], {"Ref": "AWS::NoValue"}, ), UserData=Base64(Join('', InstUserData)), )) # End of NFSInstance t.add_mapping( 'AWSRegionAMI', { "ap-northeast-1": { "centos7": "ami-8e8847f1", "rhel7": "ami-6b0d5f0d" }, "ap-northeast-2": { "centos7": "ami-bf9c36d1", "rhel7": "ami-3eee4150" }, "ap-south-1": { "centos7": "ami-1780a878", "rhel7": "ami-5b673c34" }, "ap-southeast-1": { "centos7": "ami-8e0205f2", "rhel7": "ami-76144b0a" }, "ap-southeast-2": { "centos7": "ami-d8c21dba", "rhel7": "ami-67589505" }, "ca-central-1": { "centos7": "ami-e802818c", "rhel7": "ami-49f0762d" }, "eu-central-1": { "centos7": "ami-dd3c0f36", "rhel7": "ami-c86c3f23" }, "eu-west-1": { "centos7": "ami-3548444c", "rhel7": "ami-7c491f05" }, "eu-west-2": { "centos7": "ami-00846a67", "rhel7": "ami-7c1bfd1b" }, "eu-west-3": { "centos7": "ami-262e9f5b", "rhel7": "ami-5026902d" }, "sa-east-1": { "centos7": "ami-cb5803a7", "rhel7": "ami-b0b7e3dc" }, "us-east-1": { "centos7": "ami-9887c6e7", "rhel7": "ami-6871a115" }, "us-east-2": { "centos7": "ami-9c0638f9", "rhel7": "ami-03291866" }, "us-west-1": { "centos7": "ami-4826c22b", "rhel7": "ami-18726478" }, "us-west-2": { "centos7": "ami-3ecc8f46", "rhel7": "ami-28e07e50" } }) t.add_condition("not_existing_sg", Equals(Ref(ExistingSecurityGroup), "")) t.add_condition("vol_type_ebs", Equals(Ref(VolumeType), "EBS")) t.add_condition("Has_Public_Ip", Equals(Ref(UsePublicIp), "True")) t.add_condition("Has_Bucket", Not(Equals(Ref(S3BucketName), ""))) t.add_condition("create_elastic_ip", Equals(Ref(CreateElasticIP), "True")) nfswaithandle = t.add_resource( WaitConditionHandle('NFSInstanceWaitHandle')) nfswaitcondition = t.add_resource( WaitCondition("NFSInstanceWaitCondition", Handle=Ref(nfswaithandle), Timeout="1500", DependsOn="NFSInstance")) t.add_output([ Output("ElasticIP", Description="Elastic IP address for the instance", Value=Ref(EIPAddress), Condition="create_elastic_ip") ]) t.add_output([ Output("InstanceID", Description="Instance ID", Value=Ref(NFSInstance)) ]) t.add_output([ Output("InstancePrivateIP", Value=GetAtt('NFSInstance', 'PrivateIp')) ]) t.add_output([ Output("InstancePublicIP", Value=GetAtt('NFSInstance', 'PublicIp'), Condition="Has_Public_Ip") ]) t.add_output([ Output("ElasticPublicIP", Value=GetAtt('NFSInstance', 'PublicIp'), Condition="create_elastic_ip") ]) t.add_output([ Output("PrivateMountPoint", Description="Mount point on private network", Value=Join("", [GetAtt('NFSInstance', 'PrivateIp'), ":/fs1"])) ]) t.add_output([ Output("ExampleClientMountCommands", Description="Example commands to mount NFS on the clients", Value=Join("", [ "sudo mkdir /nfs1; sudo mount ", GetAtt('NFSInstance', 'PrivateIp'), ":/", Ref("ZfsPool"), "/", Ref("ZfsMountPoint"), " /nfs1" ])) ]) t.add_output([ Output("S3BucketName", Value=(Ref("S3BucketName")), Condition="Has_Bucket") ]) # "Volume01" : { "Value" : { "Ref" : "Volume01" } }, # "Volume02" : { "Value" : { "Ref" : "Volume02" } }, # "Volume03" : { "Value" : { "Ref" : "Volume03" } }, # "Volume04" : { "Value" : { "Ref" : "Volume04" } }, # "Volume05" : { "Value" : { "Ref" : "Volume05" } }, # "Volume06" : { "Value" : { "Ref" : "Volume06" } }, # "Volume07" : { "Value" : { "Ref" : "Volume07" } }, # "Volume08" : { "Value" : { "Ref" : "Volume08" } }, # "Volume09" : { "Value" : { "Ref" : "Volume09" } }, # "Volume10" : { "Value" : { "Ref" : "Volume10" } } print(t.to_json(indent=2))
MinLength="1", MaxLength="1", ConstraintDescription="S for string data, N for numeric data, or B for " "binary data", ) ) myDynamoDB = template.add_resource( Table( "myDynamoDBTable", AttributeDefinitions=[ AttributeDefinition( AttributeName=Ref(hashkeyname), AttributeType=Ref(hashkeytype) ), ], BillingMode=If("OnDemand", "PAY_PER_REQUEST", "PROVISIONED"), ProvisionedThroughput=If( "OnDemand", NoValue, ProvisionedThroughput( ReadCapacityUnits=Ref(readunits), WriteCapacityUnits=Ref(writeunits) ), ), KeySchema=[KeySchema(AttributeName=Ref(hashkeyname), KeyType="HASH")], ) ) GSITable = template.add_resource( Table( "GSITable", AttributeDefinitions=[
Listeners=[{ "InstancePort": "80", "PolicyNames": ["CookieBasedPolicy"], "LoadBalancerPort": "80", "Protocol": "HTTP" }], AvailabilityZones=GetAZs(""), )) # @alias component @db:@db to MySQLDatabase MySQLDatabase = t.add_resource( DBInstance( "MySQLDatabase", Engine="MySQL", MultiAZ=Ref(MultiAZDatabase), DBSecurityGroups=If("Is-EC2-Classic", [Ref(DBSecurityGroup)], Ref("AWS::NoValue")), MasterUsername=Ref(DBUser), MasterUserPassword=Ref(DBPassword), VPCSecurityGroups=If("Is-EC2-VPC", [GetAtt(DBEC2SecurityGroup, "GroupId")], Ref("AWS::NoValue")), AllocatedStorage=Ref(DBAllocatedStorage), DBInstanceClass=Ref(DBInstanceClass), DBName=Ref(DBName), )) WebsiteURL = t.add_output( Output( "WebsiteURL", Description="URL for newly created LAMP stack", Value=Join(
instance_role = t.add_resource( iam.Role( 'InstanceRole', Condition='CreateInstanceProfileCondition', AssumeRolePolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[awacs.sts.AssumeRole], Principal=Principal('Service', [ Sub('ec2.${AWS::URLSuffix}'), ])) ]), ManagedPolicyArns=[ # XXX: This is waaaaay too open If( 'SSMEnabledCondition', Sub('arn:${AWS::Partition}:iam::aws:policy/service-role/AmazonEC2RoleforSSM' ), Ref(AWS_NO_VALUE)) ], Policies=[])) instance_profile = t.add_resource( iam.InstanceProfile( 'InstanceProfile', Condition='CreateInstanceProfileCondition', Roles=[Ref(instance_role)], )) instance_resource_name = 'AWSLinuxInstance' instance_user_data = Base64( Sub('''#!/bin/bash -xe
def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() self.template.add_version('2010-09-09') self.template.add_description('Terraform State Resources') # Conditions for i in ['BucketName', 'TableName']: template.add_condition( "%sOmitted" % i, Or(Equals(variables[i].ref, ''), Equals(variables[i].ref, 'undefined'))) # Resources terraformlocktable = template.add_resource( dynamodb.Table( 'TerraformStateTable', AttributeDefinitions=[ dynamodb.AttributeDefinition(AttributeName='LockID', AttributeType='S') ], KeySchema=[ dynamodb.KeySchema(AttributeName='LockID', KeyType='HASH') ], ProvisionedThroughput=dynamodb.ProvisionedThroughput( ReadCapacityUnits=2, WriteCapacityUnits=2), TableName=If('TableNameOmitted', NoValue, variables['TableName'].ref))) template.add_output( Output('%sName' % terraformlocktable.title, Description='Name of DynamoDB table for Terraform state', Value=terraformlocktable.ref())) terraformstatebucket = template.add_resource( s3.Bucket( 'TerraformStateBucket', AccessControl=s3.Private, BucketName=If('BucketNameOmitted', NoValue, variables['BucketName'].ref), LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule(NoncurrentVersionExpirationInDays=90, Status='Enabled') ]), VersioningConfiguration=s3.VersioningConfiguration( Status='Enabled'))) template.add_output( Output('%sName' % terraformstatebucket.title, Description='Name of bucket storing Terraform state', Value=terraformstatebucket.ref())) template.add_output( Output('%sArn' % terraformstatebucket.title, Description='Arn of bucket storing Terraform state', Value=terraformstatebucket.get_att('Arn'))) managementpolicy = template.add_resource( iam.ManagedPolicy( 'ManagementPolicy', Description='Managed policy for Terraform state management.', Path='/', PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ # https://www.terraform.io/docs/backends/types/s3.html#s3-bucket-permissions Statement( Action=[awacs.s3.ListBucket], Effect=Allow, Resource=[terraformstatebucket.get_att('Arn')]), Statement( Action=[awacs.s3.GetObject, awacs.s3.PutObject], Effect=Allow, Resource=[ Join('', [ terraformstatebucket.get_att('Arn'), '/*' ]) ]), Statement(Action=[ awacs.dynamodb.GetItem, awacs.dynamodb.PutItem, awacs.dynamodb.DeleteItem ], Effect=Allow, Resource=[terraformlocktable.get_att('Arn')]) ]))) template.add_output( Output('PolicyArn', Description='Managed policy Arn', Value=managementpolicy.ref()))
}) ])) BUCKET = Bucket( 'ReplicatedBucket', DependsOn=[KMS_KEY, KMS_ALIAS], BucketName=Sub('${BucketName}-${AWS::Region}'), VersioningConfiguration=VersioningConfiguration(Status='Enabled'), LifecycleConfiguration=LifecycleConfiguration(Rules=[ LifecycleRule( Status='Enabled', AbortIncompleteMultipartUpload=AbortIncompleteMultipartUpload( DaysAfterInitiation=3), NoncurrentVersionExpirationInDays=1, Transition=LifecycleRuleTransition(StorageClass='GLACIER', TransitionInDays=If( SOURCE_REGION_CON, 31, 14))) ]), BucketEncryption=BucketEncryption(ServerSideEncryptionConfiguration=[ ServerSideEncryptionRule( ServerSideEncryptionByDefault=ServerSideEncryptionByDefault( SSEAlgorithm='aws:kms', KMSMasterKeyID=Ref(KMS_KEY))) ]), ReplicationConfiguration=If( SOURCE_REGION_CON, ReplicationConfiguration( Role=GetAtt(IAM_ROLE, 'Arn'), Rules=[ ReplicationConfigurationRules( Destination=ReplicationConfigurationRulesDestination( Bucket=Sub( f"arn:${{AWS::Partition}}:s3:::${{{BUCKET_NAME.title}}}-${{{REPLICA_REGION.title}}}"
Description="Maximum number of nodes in the cluster.", )) t.add_condition("UseDatabaseEncryption", Equals(Ref(DBStorageEncrypted), True)) t.add_condition("GovCloudCondition", Equals(Ref("AWS::Region"), "us-gov-west-1")) JiraDCStack = t.add_resource( Stack( "JiraDCStack", TemplateURL={ "Fn::Sub": [ "https://${QSS3BucketName}.${QSS3Region}.amazonaws.com/${QSS3KeyPrefix}templates/quickstart-jira-dc.template.yaml", { "QSS3Region": If("GovCloudCondition", "s3-us-gov-west-1", "s3") } ] }, Parameters={ "DBPoolMaxSize": Ref(DBPoolMaxSize), "ClusterNodeInstanceType": Ref(ClusterNodeInstanceType), "TomcatEnableLookups": Ref(TomcatEnableLookups), "ClusterNodeMin": Ref(ClusterNodeMin), "DBTimeBetweenEvictionRunsMillis": Ref(DBTimeBetweenEvictionRunsMillis), "DBPassword": Ref(DBPassword), "DBMaxWaitMillis": Ref(DBMaxWaitMillis), "ClusterNodeVolumeSize": Ref(ClusterNodeVolumeSize), "KeyPairName": Ref(KeyPairName), "DBTestWhileIdle": Ref(DBTestWhileIdle),
def test_allow_string_cluster(self): cluster_security_configuration = emr.SecurityConfiguration( "emrsecurityconfiguration", Name="EMRSecurityConfiguration", SecurityConfiguration=security_configuration, ) spot = "2" withSpotPrice = "WithSpotPrice" cluster = emr.Cluster( "Cluster", # AdditionalInfo="Additional Info", Applications=[ emr.Application(Name="Hadoop"), emr.Application(Name="Hive"), emr.Application(Name="Mahout"), emr.Application(Name="Pig"), emr.Application(Name="Spark"), ], BootstrapActions=[ emr.BootstrapActionConfig( Name="Dummy bootstrap action", ScriptBootstrapAction=emr.ScriptBootstrapActionConfig( Path="file:/usr/share/aws/emr/scripts/install-hue", Args=["dummy", "parameter"], ), ) ], Configurations=[ emr.Configuration( Classification="core-site", ConfigurationProperties={ "hadoop.security.groups.cache.secs": "250" }, ) ], Instances=emr.JobFlowInstancesConfig( Ec2KeyName="KeyName", Ec2SubnetId="SubnetId", MasterInstanceGroup=emr.InstanceGroupConfigProperty( InstanceCount="1", InstanceType=M4_LARGE, AutoScalingPolicy=emr.AutoScalingPolicy( Constraints=emr.ScalingConstraints(MinCapacity="1", MaxCapacity="3"), Rules=self.generate_rules("MasterAutoScalingPolicy"), ), ), CoreInstanceGroup=emr.InstanceGroupConfigProperty( Name="Core Instance", BidPrice=If(withSpotPrice, Ref(spot), Ref("AWS::NoValue")), Market=If(withSpotPrice, "SPOT", "ON_DEMAND"), InstanceCount="1", InstanceType=M4_LARGE, AutoScalingPolicy=emr.AutoScalingPolicy( Constraints=emr.ScalingConstraints(MinCapacity="1", MaxCapacity="3"), Rules=self.generate_rules("CoreAutoScalingPolicy"), ), ), ), JobFlowRole="EMRJobFlowRole", LogUri="s3://cluster-logs", Name="EMR Cluster", ReleaseLabel="emr-5.5.0", SecurityConfiguration=Ref(cluster_security_configuration), ServiceRole="EMRServiceRole", AutoScalingRole="EMR_AutoScaling_DefaultRole", VisibleToAllUsers="true", Tags=Tags(Name="EMR Sample Cluster"), ) cluster.to_dict() autoscale_policy = emr.AutoScalingPolicy( Constraints=emr.ScalingConstraints(MinCapacity=0, MaxCapacity=5), Rules=[ emr.ScalingRule( Name="ScaleUpContainerPending", Description="Scale up on over-provisioned " "containers", Action=emr.ScalingAction( SimpleScalingPolicyConfiguration=emr. SimpleScalingPolicyConfiguration( AdjustmentType=emr.CHANGE_IN_CAPACITY, CoolDown=300, ScalingAdjustment=1, )), Trigger=emr.ScalingTrigger( CloudWatchAlarmDefinition=emr. CloudWatchAlarmDefinition( ComparisonOperator="GREATER_THAN", MetricName="ContainerPendingRatio", Period=300, Threshold=0.75, Dimensions=[ emr.MetricDimension(Key="JobFlowId", Value="${emr.clusterId}") ], )), ), emr.ScalingRule( Name="ScaleUpMemory", Description="Scale up on low memory", Action=emr.ScalingAction( SimpleScalingPolicyConfiguration=emr. SimpleScalingPolicyConfiguration( AdjustmentType="CHANGE_IN_CAPACITY", CoolDown=300, ScalingAdjustment=1, )), Trigger=emr.ScalingTrigger( CloudWatchAlarmDefinition=emr. CloudWatchAlarmDefinition( ComparisonOperator="LESS_THAN", MetricName="YARNMemoryAvailablePercentage", Period=300, Threshold=15, Dimensions=[ emr.MetricDimension(Key="JobFlowId", Value="${emr.clusterId}") ], )), ), emr.ScalingRule( Name="ScaleDownMemory", Description="Scale down on high memory", Action=emr.ScalingAction( SimpleScalingPolicyConfiguration=emr. SimpleScalingPolicyConfiguration( AdjustmentType=emr.CHANGE_IN_CAPACITY, CoolDown=300, ScalingAdjustment=-1, )), Trigger=emr.ScalingTrigger( CloudWatchAlarmDefinition=emr. CloudWatchAlarmDefinition( ComparisonOperator="GREATER_THAN", MetricName="YARNMemoryAvailablePercentage", Period=300, Threshold=75, Dimensions=[ emr.MetricDimension(Key="JobFlowId", Value="${emr.clusterId}") ], )), ), ], ) emr.InstanceGroupConfig( "TaskInstanceGroup", AutoScalingPolicy=autoscale_policy, InstanceCount=0, InstanceType=M4_LARGE, InstanceRole="TASK", Market="ON_DEMAND", Name="Task Instance", JobFlowId=Ref(cluster), )
return exports + bootstrap_script_body.splitlines(True) AmbariNode = t.add_resource( ec2.Instance( "AmbariNode", UserData=Base64( Join( "", my_bootstrap_script('AmbariNode', 'true', 'true', '127.0.0.1'))), ImageId=FindInMap("CENTOS7", Ref("AWS::Region"), "AMI"), BlockDeviceMappings=If( "AmbariUseEBSBool", my_block_device_mappings_ebs(ref_disk_ambari_ebs_diskcount, "/dev/sd", ref_disk_ambari_ebs_volumesize, "gp2"), my_block_device_mappings_ephemeral(24, "/dev/sd")), CreationPolicy=CreationPolicy( ResourceSignal=ResourceSignal(Count=1, Timeout="PT45M")), KeyName=Ref(KeyName), InstanceType=Ref(AmbariInstanceType), NetworkInterfaces=[ ec2.NetworkInterfaceProperty( DeleteOnTermination="true", DeviceIndex="0", SubnetId=Ref(PublicSubnet), GroupSet=[Ref(AmbariSecurityGroup)], AssociatePublicIpAddress="true", ),
def main(args): t = Template() # [0 shared_dir, 1 efs_fs_id, 2 performance_mode, 3 efs_kms_key_id, # 4 provisioned_throughput, 5 encrypted, 6 throughput_mode, 7 exists_valid_head_node_mt, 8 exists_valid_compute_mt] efs_options = t.add_parameter( Parameter( "EFSOptions", Type="CommaDelimitedList", Description="Comma separated list of efs related options, 9 parameters in total", ) ) compute_security_group = t.add_parameter( Parameter("ComputeSecurityGroup", Type="String", Description="Security Group for Mount Target") ) head_node_subnet_id = t.add_parameter( Parameter("MasterSubnetId", Type="String", Description="Head node subnet id for head node mount target") ) compute_subnet_id = t.add_parameter( Parameter( "ComputeSubnetId", Type="String", Description="User provided compute subnet id. Will be use to create compute mount target if needed.", ) ) create_efs = t.add_condition( "CreateEFS", And(Not(Equals(Select(str(0), Ref(efs_options)), "NONE")), Equals(Select(str(1), Ref(efs_options)), "NONE")), ) create_head_node_mt = t.add_condition( "CreateMasterMT", And(Not(Equals(Select(str(0), Ref(efs_options)), "NONE")), Equals(Select(str(7), Ref(efs_options)), "NONE")), ) no_mt_in_compute_az = t.add_condition("NoMTInComputeAZ", Equals(Select(str(8), Ref(efs_options)), "NONE")) use_user_provided_compute_subnet = t.add_condition( "UseUserProvidedComputeSubnet", Not(Equals(Ref(compute_subnet_id), "NONE")) ) # Need to create compute mount target if: # user is providing a compute subnet and # there is no existing MT in compute subnet's AZ(includes case where head node AZ == compute AZ). # # If user is not providing a compute subnet, either we are using the head node subnet as compute subnet, # or we will be creating a compute subnet that is in the same AZ as head node subnet, # see ComputeSubnet resource in the main stack. # In both cases no compute MT is needed. create_compute_mt = t.add_condition( "CreateComputeMT", And(Condition(use_user_provided_compute_subnet), Condition(no_mt_in_compute_az)) ) use_performance_mode = t.add_condition("UsePerformanceMode", Not(Equals(Select(str(2), Ref(efs_options)), "NONE"))) use_efs_encryption = t.add_condition("UseEFSEncryption", Equals(Select(str(5), Ref(efs_options)), "true")) use_efs_kms_key = t.add_condition( "UseEFSKMSKey", And(Condition(use_efs_encryption), Not(Equals(Select(str(3), Ref(efs_options)), "NONE"))) ) use_throughput_mode = t.add_condition("UseThroughputMode", Not(Equals(Select(str(6), Ref(efs_options)), "NONE"))) use_provisioned = t.add_condition("UseProvisioned", Equals(Select(str(6), Ref(efs_options)), "provisioned")) use_provisioned_throughput = t.add_condition( "UseProvisionedThroughput", And(Condition(use_provisioned), Not(Equals(Select(str(4), Ref(efs_options)), "NONE"))), ) fs = t.add_resource( FileSystem( "EFSFS", PerformanceMode=If(use_performance_mode, Select(str(2), Ref(efs_options)), NoValue), ProvisionedThroughputInMibps=If(use_provisioned_throughput, Select(str(4), Ref(efs_options)), NoValue), ThroughputMode=If(use_throughput_mode, Select(str(6), Ref(efs_options)), NoValue), Encrypted=If(use_efs_encryption, Select(str(5), Ref(efs_options)), NoValue), KmsKeyId=If(use_efs_kms_key, Select(str(3), Ref(efs_options)), NoValue), Condition=create_efs, ) ) t.add_resource( MountTarget( "MasterSubnetEFSMT", FileSystemId=If(create_efs, Ref(fs), Select(str(1), Ref(efs_options))), SecurityGroups=[Ref(compute_security_group)], SubnetId=Ref(head_node_subnet_id), Condition=create_head_node_mt, ) ) t.add_resource( MountTarget( "ComputeSubnetEFSMT", FileSystemId=If(create_efs, Ref(fs), Select(str(1), Ref(efs_options))), SecurityGroups=[Ref(compute_security_group)], SubnetId=Ref(compute_subnet_id), Condition=create_compute_mt, ) ) t.add_output( Output( "FileSystemId", Description="ID of the FileSystem", Value=If(create_efs, Ref(fs), Select("1", Ref(efs_options))), ) ) # Specify output file path json_file_path = args.target_path output_file = open(json_file_path, "w") output_file.write(t.to_json()) output_file.close()
def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() self.template.set_version("2010-09-09") self.template.set_description("Terraform State Resources") # Conditions for i in ["BucketName", "TableName"]: template.add_condition( "%sOmitted" % i, Or(Equals(variables[i].ref, ""), Equals(variables[i].ref, "undefined")), ) # Resources terraformlocktable = template.add_resource( dynamodb.Table( "TerraformStateTable", AttributeDefinitions=[ dynamodb.AttributeDefinition(AttributeName="LockID", AttributeType="S") ], KeySchema=[ dynamodb.KeySchema(AttributeName="LockID", KeyType="HASH") ], ProvisionedThroughput=dynamodb.ProvisionedThroughput( ReadCapacityUnits=2, WriteCapacityUnits=2), TableName=If("TableNameOmitted", NoValue, variables["TableName"].ref), )) template.add_output( Output( "%sName" % terraformlocktable.title, Description="Name of DynamoDB table for Terraform state", Value=terraformlocktable.ref(), )) terraformstatebucket = template.add_resource( s3.Bucket( "TerraformStateBucket", DeletionPolicy="Retain", AccessControl=s3.Private, BucketName=If("BucketNameOmitted", NoValue, variables["BucketName"].ref), LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule(NoncurrentVersionExpirationInDays=90, Status="Enabled") ]), VersioningConfiguration=s3.VersioningConfiguration( Status="Enabled"), )) template.add_output( Output( "%sName" % terraformstatebucket.title, Description="Name of bucket storing Terraform state", Value=terraformstatebucket.ref(), )) template.add_output( Output( "%sArn" % terraformstatebucket.title, Description="Arn of bucket storing Terraform state", Value=terraformstatebucket.get_att("Arn"), )) managementpolicy = template.add_resource( iam.ManagedPolicy( "ManagementPolicy", Description="Managed policy for Terraform state management.", Path="/", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ # https://www.terraform.io/docs/backends/types/s3.html#s3-bucket-permissions Statement( Action=[awacs.s3.ListBucket], Effect=Allow, Resource=[terraformstatebucket.get_att("Arn")], ), Statement( Action=[awacs.s3.GetObject, awacs.s3.PutObject], Effect=Allow, Resource=[ Join("", [ terraformstatebucket.get_att("Arn"), "/*" ]) ], ), Statement( Action=[ awacs.dynamodb.GetItem, awacs.dynamodb.PutItem, awacs.dynamodb.DeleteItem, ], Effect=Allow, Resource=[terraformlocktable.get_att("Arn")], ), ], ), )) template.add_output( Output( "PolicyArn", Description="Managed policy Arn", Value=managementpolicy.ref(), ))
def main(): t = Template() AddAMIMap(t) t.set_version("2010-09-09") t.set_description( "DCV 2017 Remote Desktop with Xilinx Vivado (using AWS FPGA Developer AMI)" ) tags = Tags(Name=Ref("AWS::StackName")) # user data InstUserData = list() InstUserData = [ '#!/usr/bin/env bash\n', '\n', 'set -x\n', '\n', '##exit 0\n', # use this to disable all user-data and bring up files '\n', 'my_wait_handle="', Ref('InstanceWaitHandle'), '"\n', 'user_name="', Ref('UserName'), '"\n', 'user_pass="******"\n', '\n', ] with open('_include/dcv-install.sh', 'r',) as ud_file: user_data_file = ud_file.readlines() for l in user_data_file: InstUserData.append(l) VPCId = t.add_parameter(Parameter( 'VPCId', Type="AWS::EC2::VPC::Id", Description="VPC ID for where the remote desktop instance should be launched" )) t.set_parameter_label(VPCId, "VPC ID") t.add_parameter_to_group(VPCId, "Instance Configuration") Subnet = t.add_parameter(Parameter( 'Subnet', Type="AWS::EC2::Subnet::Id", Description="For the Subnet ID, you should choose one in the " "Availability Zone where you want the instance launched" )) t.set_parameter_label(Subnet, "Subnet ID") t.add_parameter_to_group(Subnet, "Instance Configuration") ExistingSecurityGroup = t.add_parameter(Parameter( 'ExistingSecurityGroup', Type="String", Default="NO_VALUE", Description="OPTIONAL: Needs to be a SG ID, for example sg-abcd1234efgh. " "This is an already existing Security Group ID that is " "in the same VPC, this is an addition to the security groups that " "are automatically created to enable access to the remote desktop," "leave as NO_VALUE if you choose not to use this" )) t.set_parameter_label(ExistingSecurityGroup, "OPTIONAL: Existing Security Group (e.g. sg-abcd1234efgh)") t.add_parameter_to_group(ExistingSecurityGroup, "Instance Configuration") remoteDesktopInstanceType = t.add_parameter(Parameter( 'remoteDesktopInstanceType', Type="String", Description="This is the instance type that will be used. As this is a " "2D workstation, we are not supporting GPU instance types.", Default="m4.xlarge", AllowedValues=[ "m4.large", "m4.xlarge", "m4.2xlarge", "m4.4xlarge", "m4.10xlarge", "m5.large", "m5.xlarge", "m5.2xlarge", "m5.4xlarge", "m5.12xlarge", "m5.24xlarge", "z1d.large", "z1d.xlarge", "z1d.2xlarge", "z1d.3xlarge", "z1d.6xlarge", "z1d.12xlarge", "z1d.metal" ], ConstraintDescription= "Must an EC2 instance type from the list" )) t.set_parameter_label(remoteDesktopInstanceType, "Remote Desktop Instance Type") t.add_parameter_to_group(remoteDesktopInstanceType, "Instance Configuration") EC2KeyName = t.add_parameter(Parameter( 'EC2KeyName', Type="AWS::EC2::KeyPair::KeyName", Description="Name of an existing EC2 KeyPair to enable SSH access to the instance.", ConstraintDescription="REQUIRED: Must be a valid EC2 key pair" )) t.set_parameter_label(EC2KeyName, "EC2 Key Name") t.add_parameter_to_group(EC2KeyName, "Instance Configuration") OperatingSystem = t.add_parameter(Parameter( 'OperatingSystem', Type="String", Description="Operating System of the AMI", Default="centos7", AllowedValues=[ "centos7" ], ConstraintDescription="Must be: centos7" )) t.set_parameter_label(OperatingSystem, "Operating System of AMI") t.add_parameter_to_group(OperatingSystem, "Instance Configuration") StaticPrivateIpAddress = t.add_parameter(Parameter( 'StaticPrivateIpAddress', Type="String", Default="NO_VALUE", Description="OPTIONAL: If you already have a private VPC address range, you can " "specify the private IP address to use, leave as NO_VALUE if you choose not to use this", )) t.set_parameter_label(StaticPrivateIpAddress, "OPTIONAL: Static Private IP Address") t.add_parameter_to_group(StaticPrivateIpAddress, "Instance Configuration") UsePublicIp = t.add_parameter(Parameter( 'UsePublicIp', Type="String", Description="Should a public IP address be given to the instance, " "this is overridden by CreateElasticIP=True", Default="True", ConstraintDescription="True/False", AllowedValues=[ "True", "False" ] )) t.set_parameter_label(UsePublicIp, "Assign a public IP Address") t.add_parameter_to_group(UsePublicIp, "Instance Configuration") CreateElasticIP = t.add_parameter(Parameter( 'CreateElasticIP', Type="String", Description="Should an Elastic IP address be created and assigned, " "this allows for persistent IP address assignment", Default="True", ConstraintDescription="True/False", AllowedValues=[ "True", "False" ] )) t.set_parameter_label(CreateElasticIP, "Create an Elastic IP address") t.add_parameter_to_group(CreateElasticIP, "Instance Configuration") S3BucketName = t.add_parameter(Parameter( 'S3BucketName', Type="String", Default="NO_VALUE", Description="OPTIONAL: S3 bucket to allow this instance read access (List and Get)," "leave as NO_VALUE if you choose not to use this" )) t.set_parameter_label(S3BucketName, "OPTIONAL: S3 bucket for read access") t.add_parameter_to_group(S3BucketName, "Instance Configuration") AccessCidr = t.add_parameter(Parameter( 'AccessCidr', Type="String", Description="This is the CIDR block for allowing remote access, for ports 22 and 8443", Default="111.222.333.444/32", AllowedPattern="(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", ConstraintDescription="Must be a valid CIDR x.x.x.x/x" )) t.set_parameter_label(AccessCidr, "CIDR block for remote access (ports 22 and 8443)") t.add_parameter_to_group(AccessCidr, "Instance Configuration") UserName = t.add_parameter(Parameter( 'UserName', Type="String", Description="User name for DCV remote desktop login, default is \"simuser\".", Default="simuser", MinLength="4", )) t.set_parameter_label(UserName, "User name for DCV login") t.add_parameter_to_group(UserName, "DCV Configuration") UserPass = t.add_parameter(Parameter( 'UserPass', Type="String", Description="Password for DCV remote desktop login. The default password is Ch4ng3M3!", Default="Ch4ng3M3!", MinLength="8", AllowedPattern="^((?=.*[a-z])(?=.*[A-Z])(?=.*[\\d])|(?=.*[a-z])(?=.*[A-Z])(?=.*[\\W_])|(?=.*[a-z])(?=.*[\\d])(?=.*[\\W_])|(?=.*[A-Z])(?=.*[\\d])(?=.*[\\W_])).+$", ConstraintDescription="Password must contain at least one element from three of the following sets: lowercase letters, uppercase letters, base 10 digits, non-alphanumeric characters", NoEcho=True )) t.set_parameter_label(UserPass, "Password for DCV login") t.add_parameter_to_group(UserPass, "DCV Configuration") # end parameters RootRole = t.add_resource(iam.Role( "RootRole", AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"], }, "Action": ["sts:AssumeRole"] }] } )) dcvBucketPolicy= t.add_resource(PolicyType( "dcvBucketPolicy", PolicyName="dcvBucketPolicy", Roles=[Ref(RootRole)], PolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": ["s3:GetObject"], "Resource": "arn:aws:s3:::dcv-license.us-east-1/*" } ], }, )), BucketPolicy= t.add_resource(PolicyType( "BucketPolicy", PolicyName="BucketPolicy", Roles=[Ref(RootRole)], PolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": ["s3:GetObject"], "Resource": {"Fn::Join":["", ["arn:aws:s3:::", {"Ref": "S3BucketName"},"/*"]]} }, { "Effect": "Allow", "Action": [ "s3:ListBucket"], "Resource": {"Fn::Join":["", ["arn:aws:s3:::", {"Ref": "S3BucketName"}]]} } ], }, Condition="Has_Bucket" )), remoteDesktopSecurityGroup = t.add_resource(SecurityGroup( "remoteDesktopSecurityGroup", VpcId = Ref(VPCId), GroupDescription = "Remote Desktop Secuirty group", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="8443", ToPort="8443", CidrIp=Ref(AccessCidr), ), ] )) SshSecurityGroup = t.add_resource(SecurityGroup( "SshSecurityGroup", VpcId = Ref(VPCId), GroupDescription = "SSH Secuirty group", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp=Ref(AccessCidr), ), ] )) RootInstanceProfile = t.add_resource(InstanceProfile( "RootInstanceProfile", Roles=[Ref(RootRole)] )) remoteDesktopInstance = t.add_resource(ec2.Instance( 'remoteDesktopInstance', ImageId=FindInMap("AWSRegionAMI", Ref("AWS::Region"), Ref(OperatingSystem)), KeyName=Ref(EC2KeyName), InstanceType=(Ref(remoteDesktopInstanceType)), DisableApiTermination='false', NetworkInterfaces=[ NetworkInterfaceProperty( SubnetId=Ref(Subnet), GroupSet=If( "not_existing_sg", [Ref(remoteDesktopSecurityGroup), Ref(SshSecurityGroup)], [Ref(remoteDesktopSecurityGroup), Ref(SshSecurityGroup), Ref(ExistingSecurityGroup)] ), AssociatePublicIpAddress=Ref(UsePublicIp), DeviceIndex='0', DeleteOnTermination='true', PrivateIpAddress=If( "Has_Static_Private_IP", Ref(StaticPrivateIpAddress), Ref("AWS::NoValue"), ) ) ], IamInstanceProfile=(Ref(RootInstanceProfile)), UserData=Base64(Join('', InstUserData)), )) EIPAddress = t.add_resource(EIP( 'EIPAddress', Domain='vpc', InstanceId=Ref(remoteDesktopInstance), Condition="create_elastic_ip" )) t.add_condition( "not_existing_sg", Equals(Ref(ExistingSecurityGroup), "NO_VALUE") ) t.add_condition( "Has_Public_Ip", Equals(Ref(UsePublicIp), "True") ) t.add_condition( "Has_Bucket", Not(Equals(Ref(S3BucketName), "NO_VALUE")) ) t.add_condition( "create_elastic_ip", Equals(Ref(CreateElasticIP), "True") ) t.add_condition( "Has_Static_Private_IP", Not(Equals(Ref(StaticPrivateIpAddress), "NO_VALUE")) ) waithandle = t.add_resource(WaitConditionHandle('InstanceWaitHandle')) instanceWaitCondition = t.add_resource(WaitCondition( "instanceWaitCondition", Handle=Ref(waithandle), Timeout="3600", DependsOn="remoteDesktopInstance" )) t.add_output([ Output( "DCVConnectionLink", Description="Connect to the DCV Remote Desktop with this URL", Value=Join("", [ "https://", GetAtt("remoteDesktopInstance", 'PublicIp'), ":8443" ]) ), Output( "DCVUserName", Description="Login name for DCV session", Value=(Ref(UserName)) ), Output( "SSHTunnelCommand", Description='Command for setting up SSH tunnel to remote desktop, use "localhost:18443" for DCV client', Value=Join("", [ "ssh -i <file.pem> -L 18443:localhost:8443 -l centos ", GetAtt("remoteDesktopInstance", 'PublicIp') ]) ), ]) #print(t.to_json(indent=2)) print(to_yaml(t.to_json(indent=2), clean_up=True))
}) ]) ], JobFlowRole=Ref(emr_instance_profile), ServiceRole=Ref(emr_service_role), Instances=emr.JobFlowInstancesConfig( Ec2KeyName=Ref(keyname), Ec2SubnetId=Ref(subnet), MasterInstanceGroup=emr.InstanceGroupConfigProperty( Name="Master Instance", InstanceCount="1", InstanceType=M4_LARGE, Market="ON_DEMAND"), CoreInstanceGroup=emr.InstanceGroupConfigProperty( Name="Core Instance", BidPrice=If(withSpotPrice, Ref(spot), Ref("AWS::NoValue")), Market=If(withSpotPrice, "SPOT", "ON_DEMAND"), EbsConfiguration=emr.EbsConfiguration(EbsBlockDeviceConfigs=[ emr.EbsBlockDeviceConfigs( VolumeSpecification=emr.VolumeSpecification( SizeInGB="10", VolumeType="gp2"), VolumesPerInstance="1") ], EbsOptimized="true"), InstanceCount="1", InstanceType=M4_LARGE, )), Applications=[ emr.Application(Name="Hadoop"), emr.Application(Name="Hive"), emr.Application(Name="Mahout"),
def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() template.add_version('2010-09-09') template.add_description('Kubernetes workers via EKS - V1.0.0 ' '- compatible with amazon-eks-node-v23+') # Metadata template.add_metadata({ 'AWS::CloudFormation::Interface': { 'ParameterGroups': [{ 'Label': { 'default': 'EKS Cluster' }, 'Parameters': [ variables[i].name for i in ['ClusterName', 'ClusterControlPlaneSecurityGroup'] ] }, { 'Label': { 'default': 'Worker Node Configuration' }, 'Parameters': [ variables[i].name for i in [ 'NodeGroupName', 'NodeAutoScalingGroupMinSize', 'NodeAutoScalingGroupMaxSize', 'UseDesiredInstanceCount', 'NodeInstanceType', 'NodeInstanceProfile', 'NodeImageId', 'NodeVolumeSize', 'KeyName', 'UseSpotInstances', 'SpotBidPrice', 'BootstrapArguments' ] ] }, { 'Label': { 'default': 'Worker Network Configuration' }, 'Parameters': [variables[i].name for i in ['VpcId', 'Subnets']] }] } }) # Conditions template.add_condition( 'SetSpotPrice', Equals(variables['UseSpotInstances'].ref, 'yes')) template.add_condition( 'DesiredInstanceCountSpecified', Equals(variables['UseDesiredInstanceCount'].ref, 'true')) template.add_condition('KeyNameSpecified', Not(Equals(variables['KeyName'].ref, ''))) # Resources nodesecuritygroup = template.add_resource( ec2.SecurityGroup( 'NodeSecurityGroup', GroupDescription='Security group for all nodes in the cluster', Tags=[ { 'Key': Sub('kubernetes.io/cluster/${ClusterName}'), 'Value': 'owned' }, ], VpcId=variables['VpcId'].ref)) template.add_output( Output('NodeSecurityGroup', Description='Security group for all nodes in the cluster', Value=nodesecuritygroup.ref())) template.add_resource( ec2.SecurityGroupIngress( 'NodeSecurityGroupIngress', Description='Allow node to communicate with each other', GroupId=nodesecuritygroup.ref(), SourceSecurityGroupId=nodesecuritygroup.ref(), IpProtocol='-1', FromPort=0, ToPort=65535)) template.add_resource( ec2.SecurityGroupIngress( 'NodeSecurityGroupFromControlPlaneIngress', Description='Allow worker Kubelets and pods to receive ' 'communication from the cluster control plane', GroupId=nodesecuritygroup.ref(), SourceSecurityGroupId=variables[ 'ClusterControlPlaneSecurityGroup'].ref, # noqa IpProtocol='tcp', FromPort=1025, ToPort=65535)) template.add_resource( ec2.SecurityGroupEgress( 'ControlPlaneEgressToNodeSecurityGroup', Description='Allow the cluster control plane to communicate ' 'with worker Kubelet and pods', GroupId=variables['ClusterControlPlaneSecurityGroup'].ref, DestinationSecurityGroupId=nodesecuritygroup.ref(), IpProtocol='tcp', FromPort=1025, ToPort=65535)) template.add_resource( ec2.SecurityGroupIngress( 'NodeSecurityGroupFromControlPlaneOn443Ingress', Description='Allow pods running extension API servers on port ' '443 to receive communication from cluster ' 'control plane', GroupId=nodesecuritygroup.ref(), SourceSecurityGroupId=variables[ 'ClusterControlPlaneSecurityGroup'].ref, # noqa IpProtocol='tcp', FromPort=443, ToPort=443)) template.add_resource( ec2.SecurityGroupEgress( 'ControlPlaneEgressToNodeSecurityGroupOn443', Description='Allow the cluster control plane to communicate ' 'with pods running extension API servers on port ' '443', GroupId=variables['ClusterControlPlaneSecurityGroup'].ref, DestinationSecurityGroupId=nodesecuritygroup.ref(), IpProtocol='tcp', FromPort=443, ToPort=443)) template.add_resource( ec2.SecurityGroupIngress( 'ClusterControlPlaneSecurityGroupIngress', Description='Allow pods to communicate with the cluster API ' 'Server', GroupId=variables['ClusterControlPlaneSecurityGroup'].ref, SourceSecurityGroupId=nodesecuritygroup.ref(), IpProtocol='tcp', FromPort=443, ToPort=443)) nodelaunchconfig = template.add_resource( autoscaling.LaunchConfiguration( 'NodeLaunchConfig', AssociatePublicIpAddress=True, IamInstanceProfile=variables['NodeInstanceProfile'].ref, ImageId=variables['NodeImageId'].ref, InstanceType=variables['NodeInstanceType'].ref, KeyName=If('KeyNameSpecified', variables['KeyName'].ref, NoValue), SecurityGroups=[nodesecuritygroup.ref()], SpotPrice=If('SetSpotPrice', variables['SpotBidPrice'].ref, NoValue), BlockDeviceMappings=[ autoscaling.BlockDeviceMapping( DeviceName='/dev/xvda', Ebs=autoscaling.EBSBlockDevice( VolumeSize=variables['NodeVolumeSize'].ref, VolumeType='gp2', DeleteOnTermination=True)) ], UserData=Base64( Sub('\n'.join([ '#!/bin/bash', 'set -o xtrace', '/etc/eks/bootstrap.sh ${ClusterName} ${BootstrapArguments}', # noqa '/opt/aws/bin/cfn-signal --exit-code $? \\', '--stack ${AWS::StackName} \\', '--resource NodeGroup \\', '--region ${AWS::Region}' ]))))) template.add_resource( autoscaling.AutoScalingGroup( 'NodeGroup', DesiredCapacity=If( 'DesiredInstanceCountSpecified', variables['NodeAutoScalingGroupMaxSize'].ref, NoValue), LaunchConfigurationName=nodelaunchconfig.ref(), MinSize=variables['NodeAutoScalingGroupMinSize'].ref, MaxSize=variables['NodeAutoScalingGroupMaxSize'].ref, VPCZoneIdentifier=variables['Subnets'].ref, Tags=[ autoscaling.Tag( 'Name', Sub('${ClusterName}-${NodeGroupName}-Node'), True), autoscaling.Tag( Sub('kubernetes.io/cluster/${ClusterName}'), 'owned', True) ], UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( MinInstancesInService='1', MaxBatchSize='1'))))