def create_cft():
    """Creates the CFT"""
    template = Template()
    template.add_version("2010-09-09")

    template.add_description(
        "AWS CloudFormation Template: Redshift cluster in a VPC")

    # Parameters
    for param in parameters:
        template.add_parameter(param)

    # Conditions
    for condition in conditions:
        template.add_condition(condition, conditions[condition])

    # Custom lookups
    template.add_resource(subnet_info)
    template.add_resource(sg_info)

    # Security Group
    template.add_resource(securitygroup)

    # Redshift components
    for item in redshift_resources:
        template.add_resource(item)

    # Outputs
    for output in outputs:
        template.add_output(output)

    return template
Exemple #2
0
def main(argv):

    # Set up a blank template
    t = Template()

    # Add description
    t.add_description("RDS Database")

    # Add all defined input parameters to template
    for p in parameters.values():
        t.add_parameter(p)

    t.add_condition("InstallPostgis", Not(Equals(Ref("ProvisionStackName"),
                                                 "")))
    t.add_resource(gen_postgis_provisioner())

    # Create instance and security group add to template
    t.add_resource(gen_sg())
    for resource in gen_rds_db(service_name):
        t.add_resource(resource)

    t.add_output(
        Output("ConnectionString",
               Description="Connection string for database",
               Value=Join("", [GetAtt("DB", "Endpoint.Address"),
                               Ref("DB")])))

    # Convert template to json
    template = (t.to_json())

    # Print template to console (for debugging) and write to file
    print(template)
    write_to_file(template)
def template():
    t = Template()
    for p in parameters.values():
        t.add_parameter(p)
    for k in conditions:
        t.add_condition(k, conditions[k])
    for r in resources.values():
        t.add_resource(r)
    return t
Exemple #4
0
def template():
    t = Template()
    for p in parameters.values():
        t.add_parameter(p)
    for k in conditions:
        t.add_condition(k, conditions[k])
    for r in resources.values():
        t.add_resource(r)
    return t
Exemple #5
0
def template(**kwargs):
    """
    Args:

    Returns:
      template Template()
    """
    template = Template()
    release = template.add_parameter(
        Parameter('ReleaseNewAlias',
                  Type="String",
                  AllowedValues=['Yes', 'No'],
                  Default='No'))
    release_condition = template.add_condition(
        'ReleaseAlias', {'ReleaseAlias': Equals(Ref(release), 'Yes')})
    function = template.add_resource(lambda_function(**kwargs))
    version = template.add_resource(
        Version('LambdaVersion', FunctionName=GetAtt(function, 'Arn')))
    alias = template.add_resource(
        Alias('LambdaAlias',
              Name='prod',
              DependsOn=[release_condition],
              Description=Sub(f'Alias to version ${{{version.title}.Arn}}'),
              FunctionName=Ref(function),
              FunctionVersion=Ref(version)))
    template.add_output(object_outputs(function, True))
    template.add_output(object_outputs(version, True))
    return template
Exemple #6
0
def generate_template(accountalarms, configrules, s3bucket, s3key):
    parameters = []
    maps = []
    resources = []
    outputs = []
    conditions = {}

    resources, outputs = add_sns_topic(resources, outputs)

    for item in accountalarms:
        if item['Type'].strip() == 'CloudTrail':
            resources = add_cloudwatch_metric(resources, item)
            resources = add_cloudwatch_alarm(resources, item)

    resources, outputs = add_lambda_function(resources, outputs, s3bucket, s3key)

    for item in configrules:
        resources = add_config_rule(resources, item)

    template = Template()
    template.add_version('2010-09-09')
    template.add_description(
        "This is an AWS CloudFormation template that provisions metric filters"
        " based on a spreadsheet of applicable metric filters."
        " ***WARNING*** "
        "This template creates many Amazon CloudWatch alarms based on a Amazon"
        " CloudWatch Logs Log Group. You will be billed for the AWS resources "
        "used if you create a stack from this template."
    )
    [template.add_parameter(p) for p in parameters]
    [template.add_condition(k, conditions[k]) for k in conditions]
    [template.add_resource(r) for r in resources]
    [template.add_output(o) for o in outputs]
    return template
    ConstraintDescription="Can contain only ASCII characters.",
    Type="AWS::EC2::KeyPair::KeyName",
    Description="Name of an existing EC2 KeyPair to enable SSH access to the instance",
))

AmbariUseEBS = t.add_parameter(Parameter(
    "AmbariUseEBS",
    Default="no",
    ConstraintDescription="Must be yes or no only.",
    Type="String",
    Description="Use EBS Volumes for the Ambari Node",
    AllowedValues=["yes", "no"],
))


AmbariUseEBSBool = t.add_condition("AmbariUseEBSBool", Equals(Ref(AmbariUseEBS),"yes"))

t.add_mapping("SubnetConfig",
    {'Public': {'CIDR': '10.0.0.0/24'}, 'VPC': {'CIDR': '10.0.0.0/16'}}
)

t.add_mapping("CENTOS7", {
    "eu-west-1": {"AMI": "ami-33734044"},
    "ap-southeast-1": {"AMI": "ami-2a7b6b78"},
    "ap-southeast-2": {"AMI": "ami-d38dc6e9"},
    "eu-central-1": {"AMI": "ami-e68f82fb"},
    "ap-northeast-1": {"AMI": "ami-b80b6db8"},
    "us-east-1": {"AMI": "ami-61bbf104"},
    "sa-east-1": {"AMI": "ami-fd0197e0"},
    "us-west-1": {"AMI": "ami-f77fbeb3"},
    "us-west-2": {"AMI": "ami-d440a6e7"}
Exemple #8
0
domain_name = Join('.', [Ref(param_label), Ref(param_hosted_zone_name)])

acm_cert = template.add_resource(
    custom_resources.acm.DnsValidatedCertificate(
        "AcmCert",
        Region='us-east-1',  # Api gateway is in us-east-1
        DomainName=domain_name,
        Tags=GetAtt(cloudformation_tags, 'TagList'),
    ))
template.add_output(
    Output(
        "AcmCertDnsRecords",
        Value=GetAtt(acm_cert, "DnsRecords"),
    ))

use_cert_cond = template.add_condition('UseCert',
                                       Equals(Ref(param_use_cert), 'yes'))

# Create an entry in the domain-table, so this domain is listed in the Authorizer
template.add_resource(
    custom_resources.dynamodb.Item(
        "AuthorizedDomain",
        TableName=ImportValue(
            Join('-', [Ref(param_authorizer_param_stack), "DomainTable"])),
        ItemKey={"domain": {
            "S": domain_name
        }},
    ))

# Create an entry in the group-table
template.add_resource(
    custom_resources.dynamodb.Item(
        Type="AWS::EC2::KeyPair::KeyName",
        Description=
        "Name of an existing EC2 KeyPair to enable SSH access to the instance",
    ))

UseEBS = t.add_parameter(
    Parameter(
        "UseEBS",
        Default="no",
        ConstraintDescription="Must be yes or no only.",
        Type="String",
        Description="Use EBS Volumes for the Worker Node",
        AllowedValues=["yes", "no"],
    ))

UseEBSBool = t.add_condition("UseEBSBool", Equals(Ref(UseEBS), "yes"))

t.add_mapping("SubnetConfig", {
    'Public': {
        'CIDR': '10.0.0.0/24'
    },
    'VPC': {
        'CIDR': '10.0.0.0/16'
    }
})

t.add_mapping(
    "RHEL66", {
        'ap-northeast-1': {
            'AMI': 'ami-a15666a0'
        },
Exemple #10
0
param_retire_days = t.add_parameter(
    Parameter(
        'RetireInDays',
        Description=
        'Days before retire current object, set to 0 disables retirement',
        Type='Number',
        Default=3650,
        MinValue=90,
        MaxValue=3650,
    ))

#
# Condition
#
t.add_condition('HasBucketName', Not(Equals(Ref(param_bucket_name), '')))

t.add_condition('ChinaRegionCondition', Equals(Ref(AWS_REGION), 'cn-north-1'))

#
# Resource
#

bucket = t.add_resource(
    s3.Bucket(
        'Bucket',
        BucketName=If('HasBucketName', Ref(param_bucket_name),
                      Ref(AWS_NO_VALUE)),
        LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[
            # Add a rule to
            s3.LifecycleRule(
Exemple #11
0
MEMBER_ACCOUNT_ID = "5678"
MEMBER_ACCOUNT_EMAIL = "*****@*****.**"

t = Template()

t.add_description(
    "GuardDuty example deployment for master and member accounts")

member_invitation = t.add_parameter(
    Parameter(
        "MemberInvitation",
        Type="String",
        Description=
        "Invitation ID for member account, leave empty on master account"))

t.add_condition("IsMaster", Equals(Ref(AWS_ACCOUNT_ID), MASTER_ACCOUNT_ID))
t.add_condition("IsMember", Not(Condition("IsMaster")))

detector = t.add_resource(guardduty.Detector("Detector", Enable=True))

master = t.add_resource(
    guardduty.Master(
        "Master",
        Condition="IsMember",
        DetectorId=Ref(detector),
        MasterId=MASTER_ACCOUNT_ID,
        InvitationId=Ref(member_invitation),
    ))

# You can create multiple members if you have multiple members accounts
member = t.add_resource(
        'us-west-2': {
            'AMI': 'ami-16fd7026'
        },
        'eu-west-1': {
            'AMI': 'ami-24506250'
        },
        'sa-east-1': {
            'AMI': 'ami-3e3be423'
        },
        'ap-southeast-1': {
            'AMI': 'ami-74dda626'
        },
        'ap-northeast-1': {
            'AMI': 'ami-dcfa4edd'
        }
    })

c_is_prod = t.add_condition('IsProduction', Equals(Ref('Environment'), 'true'))
#t.set_transform('AWS::Serverless-2016-10-31')

r_vpc = t.add_resource(
    ec2.VPC('VPC', Condition=c_is_prod, CidrBlock='10.0.0.0/24'))

t.add_output(
    Output('VPCId',
           Value=Ref(r_vpc),
           Description='VPC Id',
           Export=Export(Sub('${AWS::StackName}-' + r_vpc.title))))

print(t.to_yaml())
    ),
    Parameter(
        'Three',
        Type='String',
    ),
    Parameter(
        'Four',
        Type='String',
    ),
    Parameter(
        'SshKeyName',
        Type='String',
    )
])

t.add_condition('OneEqualsFoo', Equals(Ref('One'), 'Foo'))

t.add_condition('NotOneEqualsFoo', Not(Condition('OneEqualsFoo')))

t.add_condition('BarEqualsTwo', Equals('Bar', Ref('Two')))

t.add_condition('ThreeEqualsFour', Equals(Ref('Three'), Ref('Four')))

t.add_condition('OneEqualsFooOrBarEqualsTwo',
                Or(Condition('OneEqualsFoo'), Condition('BarEqualsTwo')))

t.add_condition('OneEqualsFooAndNotBarEqualsTwo',
                And(Condition('OneEqualsFoo'), Not(Condition('BarEqualsTwo'))))

t.add_condition(
    'OneEqualsFooAndBarEqualsTwoAndThreeEqualsPft',
Exemple #14
0
class Stack(StackConfig):
    ''' main stack object '''
    def __init__(self, stack_object):
        self.config = stack_object.config
        self.template = Template()
        self.sec_groups = self.__sec_groups_constructor()
        self.capabilities = []

    @staticmethod
    def __tag_role_generator(role, extra_roles):
        extra_tags = Tags(**{
            role: '',
        })
        if len(extra_roles) > 0:
            for i in extra_roles:
                extra_tags += Tags(**{
                    i: '',
                })
        return extra_tags

    # handle the case for references to static sec groups and also when they don't exist.
    def __sec_groups_constructor(self):
        default_sec_groups = []
        default_sec_groups.extend(
            (ImportValue(self.config['monitoring_security_group_export_name']),
             ImportValue(self.config['admin_security_group_export_name'])))
        return default_sec_groups

    def __tag_elb_role_generator(self):
        role = self.config['apps'].values()[0].get("role", '')
        return Tags(Role=role, env=Ref("DeploymentEnvironment"))

    def __tag_as_role_generator(self, common_tags):

        copy_common_tags = copy.copy(common_tags)
        role = self.config['apps'].values()[0].get("role", '')
        extra_roles = self.config['apps'].values()[0].get("extra_roles", '')
        extra_tags = AsTags(**{
            role: '',
        })
        if len(extra_roles) > 0:
            for i in extra_roles:
                extra_tags += AsTags(**{
                    i: '',
                })

        other_tags = AsTags(Name=Ref("AWS::StackName"), Role=role)
        all_tags = extra_tags + other_tags + copy_common_tags
        return all_tags

    @staticmethod
    def __loaduserdata(filename, as_name):
        data = []
        mydir = os.path.dirname(os.path.abspath(__file__))
        relative_path = os.path.join(mydir, '..', "data")
        try:
            with open(os.path.join(relative_path, filename), 'r') as f:
                for line in f:
                    if line.strip('\n\r ') == '':
                        continue
                    # some funkiness below. Needs to substitute AS group name, since in a case of multiple groups
                    # can't rely on a static value.
                    line = MyTemplate(line)
                    line = line.substitute({'as_name': as_name})
                    line = Sub(line)
                    data.append(line)
        except IOError:
            print "User data file could not be found"
            exit()
        return Base64(Join('', data))

    def process_config(self,
                       common_tags,
                       default_size=None,
                       alb_secgroups=None):
        # This function will be changed significantly once we start using the new_elb_
        # adder/target_adder function in favor of the classic ELB
        for k, v in self.config['apps'].iteritems():
            instance_list = []
            for index in xrange(v['count']):
                instance_list.append(
                    self.instance_adder(k, v.get('size',
                                                 default_size), v['ami_id'],
                                        v.get('role', k),
                                        v.get('extra_roles', []),
                                        index, common_tags,
                                        v.get('user_data', False)))
            if 'elb' in v:
                self.elb_adder(k, instance_list, Ref("Hostname"))
            if 'alb' in v:
                if alb_secgroups is None:
                    print "You need to pass a list of ALB Security Groups if alb is on"
                    quit()
                alb.Alb(self.template, self.config, instance_list,
                        alb_secgroups)
                self.dns_adder(v.get("dns", None), "ApplicationLoadBalancer")

    def description(self, description, version='2010-09-09'):
        self.template.add_description(description)
        self.template.add_version(version)

    def print_template(self):
        output = self.template.to_json()
        print output

    def instance_adder(self,
                       name,
                       size,
                       ami_id,
                       role,
                       extra_roles,
                       index,
                       common_tags,
                       user_data_file=False):
        instance_sec_group = name + "group"
        instance_sec_group = instance_sec_group.translate(None, '_')
        instance_id = ''.join(ch for ch in name if ch.isalnum()) + str(index)
        instance = None
        extra_tags = self.__tag_role_generator(role, extra_roles)
        userdata = ""
        instance_name = Join("", [Ref("AWS::StackName"), str(index)])
        app_config = self.config['apps'].values()[0]
        if user_data_file:
            userdata = self.__loaduserdata(user_data_file)
        blockmap = self.__generate_blockmap()
        instance = self.template.add_resource(
            ec2.Instance(
                instance_id,
                ImageId=FindInMap("RegionMap", Ref("AWS::Region"), ami_id),
                KeyName=self.config['keyname'],
                Tags=common_tags + Tags(Name=instance_name, Role=role) +
                extra_tags,
                IamInstanceProfile=self.config['iam_role'],
                SecurityGroupIds=[
                    Ref(instance_sec_group),
                    ImportValue(
                        self.config['monitoring_security_group_export_name']),
                    ImportValue(
                        self.config['admin_security_group_export_name'])
                ],
                InstanceType=size,
                SubnetId=random.choice(self.config['subnets']),
                UserData=userdata,
                BlockDeviceMappings=blockmap))
        if 'dns_host' in app_config and app_config['dns_host']:
            self.host_dns_adder(instance_name, instance_id)
        return instance

    def dns_adder(self, dns, elbid):
        dns_record_id = "DNS" + str(elbid)
        self.template.add_resource(
            RecordSetType(
                dns_record_id,
                HostedZoneName=Join("", [Ref("BaseURL"), "."]),
                Name=Join("", [dns, ".", Ref("BaseURL"), "."]),
                Type="CNAME",
                TTL="900",
                ResourceRecords=[GetAtt(elbid, "DNSName")],
            ))

    def host_dns_adder(self, dns, instanceid):
        dns_record_id = "DNS" + str(instanceid)
        self.template.add_resource(
            RecordSetType(
                dns_record_id,
                HostedZoneName=Join("", [Ref("BaseURL"), "."]),
                Name=Join("", [dns, ".", Ref("BaseURL"), "."]),
                Type="A",
                TTL="600",
                ResourceRecords=[GetAtt(instanceid, "PrivateIp")
                                 ],  # PrivateIP PrivateDnsName
            ))

    def elb_adder(self, name, instance_list, dns):
        ports = self.config['apps'].values()[0].get("ports", None)
        elb_type = self.config['apps'].values()[0].get("type",
                                                       'internet-facing')
        elb_check_type = self.config['apps'].values()[0].get(
            "elb_check_type", 'TCP')
        elb_check_path = self.config['apps'].values()[0].get(
            "elb_check_path", "")

        if 'HTTP' in elb_check_type.upper() and not elb_check_path:
            elb_check_path = "/"

        if instance_list is None:
            instance_list = []
        elb_tags = self.__tag_elb_role_generator()
        elb_id = ''.join(ch for ch in name if ch.isalnum())
        elb_sec_group = name + "_group"
        elb_sec_group = elb_sec_group.translate(None, '_')
        elasticlb = self.template.add_resource(
            elb.LoadBalancer(
                elb_id,
                Subnets=self.config['public_subnets'],
                Scheme=elb_type,
                LoadBalancerName=Join(
                    "", [Ref("AWS::StackName"), '-',
                         random.randint(1, 999)]),
                SecurityGroups=[Ref(elb_sec_group)],
                LBCookieStickinessPolicy=[
                    elb.LBCookieStickinessPolicy(
                        PolicyName='LBCookeStickinessPolicy', )
                ],
                ConnectionDrainingPolicy=elb.ConnectionDrainingPolicy(
                    Enabled=True,
                    Timeout=300,
                ),
                CrossZone=True,
                Instances=[Ref(r) for r in instance_list],
                Tags=elb_tags))

        elasticlb.Listeners = [
            elb.Listener(LoadBalancerPort="80",
                         InstancePort=Ref("WebServerPort"),
                         Protocol="HTTP",
                         PolicyNames=['LBCookeStickinessPolicy']),
            elb.Listener(
                LoadBalancerPort="443",
                InstancePort=Ref("WebServerPort"),
                Protocol="HTTPS",
                SSLCertificateId=Ref("CertName"),
                PolicyNames=['LBCookeStickinessPolicy'],
            )
        ]
        elasticlb.HealthCheck = elb.HealthCheck(Target=Join(
            "", [elb_check_type.upper(), ":", ports, elb_check_path]),
                                                HealthyThreshold="3",
                                                UnhealthyThreshold="5",
                                                Interval="30",
                                                Timeout="5")
        if dns:
            self.dns_adder(dns, elb_id)
        return elasticlb

    def autoscaling_adder(self,
                          common_tags,
                          min_size,
                          max_size,
                          min_in_service,
                          image_id,
                          instance_size,
                          sec_groups,
                          health_check_type='EC2',
                          loadbalancer=False,
                          keyname=None,
                          targetgroup=False,
                          user_data_file=False):
        lc_name = "LaunchConfiguration" + str(random.randint(1, 999))
        as_name = "AutoScalingGroup" + str(random.randint(1, 999))
        if keyname is None:
            keyname = self.config['keyname']
        if user_data_file:
            userdata = self.__loaduserdata(user_data_file, as_name)
        else:
            userdata = self.__loaduserdata("default_userdata.txt", as_name)
        as_group_tags = self.__tag_as_role_generator(common_tags)
        blockmap = self.__generate_blockmap()
        lc_groups = copy.copy(self.sec_groups)
        lc_groups.append(Ref(sec_groups))
        launch_config = self.template.add_resource(
            LaunchConfiguration(lc_name,
                                ImageId=image_id,
                                KeyName=keyname,
                                InstanceType=instance_size,
                                SecurityGroups=lc_groups,
                                IamInstanceProfile=self.config['iam_role'],
                                UserData=userdata,
                                BlockDeviceMappings=blockmap))
        as_group = autoscalinggroup = AutoScalingGroup(
            as_name,
            Tags=as_group_tags,
            LaunchConfigurationName=Ref(lc_name),
            MinSize=Ref(min_size),
            MaxSize=Ref(max_size),
            VPCZoneIdentifier=self.config['subnets'],
            HealthCheckType=health_check_type,
            DependsOn=lc_name,
            CreationPolicy=CreationPolicy(
                AutoScalingCreationPolicy=AutoScalingCreationPolicy(
                    MinSuccessfulInstancesPercent=80),
                ResourceSignal=ResourceSignal(Count=1, Timeout='PT10M')),
            UpdatePolicy=UpdatePolicy(
                AutoScalingReplacingUpdate=AutoScalingReplacingUpdate(
                    WillReplace=False, ),
                AutoScalingScheduledAction=AutoScalingScheduledAction(
                    IgnoreUnmodifiedGroupSizeProperties=True, ),
                AutoScalingRollingUpdate=AutoScalingRollingUpdate(
                    MaxBatchSize="2",
                    MinInstancesInService=Ref(min_in_service),
                    MinSuccessfulInstancesPercent=80,
                    PauseTime='PT10M',
                    WaitOnResourceSignals=True,
                    SuspendProcesses=[
                        "ReplaceUnHealthy, AZRebalance, AlarmNotifications, "
                        "ScheduledActions, HealthCheck"
                    ])))
        if loadbalancer:
            autoscalinggroup.LoadBalancerNames = loadbalancer
        if targetgroup:
            autoscalinggroup.TargetGroupARNs = [targetgroup]
        self.template.add_resource(autoscalinggroup)
        # getting a litte funky below. Only reason is to be able to do overrides for k8s. Probably will need to be
        # revisited
        as_lc = collections.namedtuple('aslc',
                                       'as_group,launch_config')(as_group,
                                                                 launch_config)
        return as_lc

    def rds_adder(self,
                  instance_identifier,
                  allocated_storage,
                  db_subnet_group,
                  rds_group,
                  db_size,
                  db_name='MyDB',
                  storage_type='gp2',
                  engine_version='5.5.40a',
                  storage_engine='MySQL',
                  publicly_accessible=False):
        db_names = ''
        db_asf = (db_name.upper(), 'DNS')
        if publicly_accessible is False:
            publicly_accessible = "false"
        else:
            publicly_accessible = "true"

        dbinstance = rds.DBInstance(
            db_name,
            DBInstanceIdentifier=instance_identifier,
            Engine=storage_engine,
            EngineVersion=engine_version,
            MasterUsername=If("NotRestoringFromSnapshot", Ref("RDSDBUser"),
                              Ref("AWS::NoValue")),
            MasterUserPassword=If("NotRestoringFromSnapshot",
                                  Ref("RDSDBPassword"), Ref("AWS::NoValue")),
            AllocatedStorage=allocated_storage,
            DBSnapshotIdentifier=If("NotRestoringFromSnapshot",
                                    Ref("AWS::NoValue"), Ref("RDSSnapshot")),
            StorageType=storage_type,
            DBSubnetGroupName=db_subnet_group,
            PubliclyAccessible=publicly_accessible,
            VPCSecurityGroups=rds_group,
            DBInstanceClass=db_size,
            StorageEncrypted=If("NotRestoringFromSnapshot", True,
                                Ref("AWS::NoValue")))
        dbdnsrecord = RecordSetType(
            db_names.join(db_asf),
            HostedZoneName=Join("", [Ref("RDSDNSDomain"), "."]),
            Name=Join("", [Ref("RDSDNSName"), ".",
                           Ref("RDSDNSDomain"), "."]),
            Type="CNAME",
            TTL="900",
            ResourceRecords=[GetAtt(dbinstance, "Endpoint.Address")],
        )
        self.template.add_resource(dbinstance)
        self.template.add_resource(dbdnsrecord)

    # these are very similar functions below. One handles the general rules and another one handles self-refrencing ones.
    # this could be handled in one function, but would require some funky stuff around conditionals and use of AWS::NoValue.
    # it's a cleaner implementation this way.

    @staticmethod
    def rule_adder(fromport,
                   toport=None,
                   cidr='0.0.0.0/0',
                   protocol='TCP',
                   sourcegroupid=Ref("AWS::NoValue")):
        if toport is None:
            toport = fromport
        rule = ec2.SecurityGroupRule(IpProtocol=protocol,
                                     FromPort=fromport,
                                     ToPort=toport,
                                     CidrIp=cidr,
                                     SourceSecurityGroupId=sourcegroupid)
        return rule

    def rule_adder_self(self, group, fromport, toport=None, protocol='TCP'):
        """ This function exits to create self-referencing security groups. """
        if toport is None:
            toport = fromport
        rule = ec2.SecurityGroupIngress("IngressRule",
                                        IpProtocol=protocol,
                                        FromPort=fromport,
                                        ToPort=toport,
                                        SourceSecurityGroupId=Ref(group),
                                        GroupId=Ref(group))
        self.template.add_resource(rule)

    def group_adder(self, group_name, rules, description=None):
        group_tags = Tags(Name=group_name)
        if description is None:
            description = "Security Group for {0} Access".format(group_name)
        group = ec2.SecurityGroup(group_name,
                                  GroupDescription=description,
                                  SecurityGroupIngress=rules,
                                  VpcId=self.config['vpcid'],
                                  Tags=group_tags)
        self.template.add_resource(group)

    def redis_adder(self,
                    name,
                    tags,
                    instance_type='cache.m3.medium',
                    nodes=1,
                    version='2.8.24'):
        rule = self.rule_adder(6379, cidr='10.0.0.0/16')
        subnetname = Join("", [name, Ref("DeploymentEnvironment")])
        self.group_adder("redissg", [rule])
        subnetgroup = self.template.add_resource(
            elasticache.SubnetGroup(
                "SubnetGroup",
                CacheSubnetGroupName=subnetname,
                Description='Subnet Group for ElasticCache Redis {0}'.format(
                    name),
                SubnetIds=self.config['subnets']))
        self.template.add_resource(
            elasticache.CacheCluster(
                "CacheCluster",
                ClusterName=name,
                Engine='redis',
                EngineVersion=version,
                CacheNodeType=instance_type,
                NumCacheNodes=nodes,
                Tags=tags,
                CacheSubnetGroupName=Ref(subnetgroup),
                VpcSecurityGroupIds=[GetAtt('redissg', "GroupId")],
            ))
        redisdnsrecord = RecordSetType(
            "RedisDNSRecord",
            HostedZoneName=Join("", [Ref("RedisDNSDomain"), "."]),
            Name=Join("",
                      [Ref("RedisDNSName"), ".",
                       Ref("RedisDNSDomain"), "."]),
            Type="CNAME",
            TTL="900",
            ResourceRecords=[GetAtt("CacheCluster", "RedisEndpoint.Address")],
        )
        self.template.add_resource(redisdnsrecord)

    def redis_adder_replcation(self,
                               name,
                               tags,
                               instance_type='cache.m3.medium',
                               cache_clusters=2,
                               version='3.2.4'):
        rule = self.rule_adder(6379, cidr='10.0.0.0/16')
        subnetname = Join("", [name, Ref("DeploymentEnvironment")])
        self.group_adder("redissg", [rule])
        subnetgroup = self.template.add_resource(
            elasticache.SubnetGroup(
                "SubnetGroup",
                CacheSubnetGroupName=subnetname,
                Description='Subnet Group for ElasticCache Redis {0}'.format(
                    name),
                SubnetIds=self.config['subnets']))
        self.template.add_resource(
            elasticache.ReplicationGroup(
                'RedisReplicationGroup',
                ReplicationGroupId=name,
                Engine='redis',
                EngineVersion=version,
                CacheNodeType=instance_type,
                NumCacheClusters=cache_clusters,
                Tags=tags,
                CacheSubnetGroupName=Ref(subnetgroup),
                ReplicationGroupDescription="%s replication group" % name,
                SecurityGroupIds=[GetAtt('redissg', "GroupId")],
            ))
        redisdnsrecord = RecordSetType(
            "RedisDNSRecord",
            HostedZoneName=Join("", [Ref("RedisDNSDomain"), "."]),
            Name=Join("",
                      [Ref("RedisDNSName"), ".",
                       Ref("RedisDNSDomain"), "."]),
            Type="CNAME",
            TTL="900",
            ResourceRecords=[
                GetAtt("RedisReplicationGroup", "PrimaryEndPoint.Address")
            ],
        )
        self.template.add_resource(redisdnsrecord)

    def elasticsearch_cluster(self, name, ebs=True, voltype='gp2'):
        es_domain = self.template.add_resource(
            Domain(
                name,
                DomainName=name + 'domain',
                ElasticsearchClusterConfig=ElasticsearchClusterConfig(
                    DedicatedMasterEnabled=True,
                    InstanceCount=2,
                    ZoneAwarenessEnabled=True,
                    InstanceType=constants.ELASTICSEARCH_M3_MEDIUM,
                    DedicatedMasterType=constants.ELASTICSEARCH_M3_MEDIUM,
                    DedicatedMasterCount=3),
                EBSOptions=EBSOptions(EBSEnabled=ebs,
                                      Iops=0,
                                      VolumeSize=20,
                                      VolumeType=voltype),
                SnapshotOptions=SnapshotOptions(AutomatedSnapshotStartHour=0),
                AccessPolicies={
                    'Version':
                    '2012-10-17',
                    'Statement': [{
                        'Effect': 'Allow',
                        'Principal': {
                            'AWS': '*'
                        },
                        'Action': 'es:*',
                        'Resource': '*'
                    }]
                },
                AdvancedOptions={
                    "rest.action.multi.allow_explicit_index": "true"
                }))
        return es_domain

    def glue_db_adder(self, name):
        self.template.add_resource(
            glue.Database(name,
                          CatalogId=Ref("AWS::AccountId"),
                          DatabaseInput=glue.DatabaseInput(Name=name)))

    def glue_table_adder(self, name, db, props):
        unique_id = re.sub('[^A-Za-z]', '', name) + db
        table = glue.Table(
            unique_id,
            DependsOn=db,
            CatalogId=Ref("AWS::AccountId"),
            DatabaseName=db,
            TableInput=glue.TableInput(
                Name=name,
                Parameters=helpers.dictConvert(props.get("params", None)),
                StorageDescriptor=glue.StorageDescriptor(
                    InputFormat=props.get("descriptor",
                                          {}).get("inputFormat",
                                                  Ref("AWS::NoValue")),
                    OutputFormat=props.get("descriptor",
                                           {}).get("outputFormat",
                                                   Ref("AWS::NoValue")),
                    Location=props.get("descriptor",
                                       {}).get("location",
                                               Ref("AWS::NoValue")),
                    SerdeInfo=glue.SerdeInfo(
                        SerializationLibrary=props.get("descriptor", {}).get(
                            "SerDe", {}).get("serialization",
                                             Ref("AWS::NoValue")),
                        Parameters=helpers.dictConvert(
                            props.get("descriptor",
                                      {}).get("SerDe",
                                              {}).get("parameters", None))))))
        columns = helpers.dictConvert((props.get("descriptor",
                                                 {}).get("columns", {})))
        if isinstance(columns, list):
            for index, item in enumerate(columns):
                column = glue.Column(**item)
                columns[index] = column

        table.TableInput.StorageDescriptor.Columns = columns
        self.template.add_resource(table)

    def kinesis_adder(self, name, shards):
        kinesis_stream = self.template.add_resource(
            kinesis.Stream(name, ShardCount=shards))

        self.template.add_output(
            [Output(
                "kinesisStreamName",
                Value=Ref(kinesis_stream),
            )])

    def cloudtrail_adder(self,
                         name,
                         bucket,
                         cw_group=Ref("AWS::NoValue"),
                         cw_role_arn=Ref("AWS::NoValue")):
        trail = cloudtrail.Trail(name,
                                 TrailName=name,
                                 IsLogging=True,
                                 S3BucketName=bucket,
                                 S3KeyPrefix=Ref("AWS::AccountId"),
                                 CloudWatchLogsLogGroupArn=cw_group,
                                 CloudWatchLogsRoleArn=cw_role_arn)
        self.template.add_resource(trail)
        return trail

    def s3_policy_adder(self, name, bucket, policy):
        self.template.add_resource(
            s3.BucketPolicy(name, Bucket=bucket, PolicyDocument=policy))

    def cloudwatch_log_adder(self, name, metric_filter=None, lambda_name=None):
        log_group = logs.LogGroup(name, LogGroupName=name)
        if metric_filter:
            self.template.add_resource(
                logs.MetricFilter(name + "filter",
                                  DependsOn=name,
                                  LogGroupName=name,
                                  FilterPattern=metric_filter,
                                  MetricTransformations=[
                                      logs.MetricTransformation(
                                          name + "transform",
                                          MetricName=name,
                                          MetricNamespace=name,
                                          MetricValue="1")
                                  ]))
        if lambda_name:
            self.template.add_resource(
                logs.SubscriptionFilter(name + "subscribe",
                                        DependsOn=name,
                                        LogGroupName=name,
                                        DestinationArn=GetAtt(
                                            lambda_name, "Arn"),
                                        FilterPattern=metric_filter))
        self.template.add_resource(log_group)
        return log_group

    def lambda_adder(self, nameref, role, condition, **kwargs):
        try:
            lambda_func = awslambda.Function(
                nameref,
                DependsOn=role,
                Code=awslambda.Code(S3Bucket=kwargs['s3_bucket'],
                                    S3Key=kwargs['s3_key'],
                                    S3ObjectVersion=If(condition,
                                                       Ref("AWS::NoValue"),
                                                       Ref("LambdaVersion"))),
                MemorySize=kwargs['memory'],
                Role=GetAtt(role, "Arn"),
                Handler=kwargs['handler'],
                Timeout=kwargs['timeout'],
                FunctionName=kwargs['name'],
                Runtime=kwargs['runtime'])
            self.template.add_resource(lambda_func)
            return lambda_func
        except Exception as e:
            print e
            print "have you set all the values in your config file?"

    def lambda_policy_adder(self, name, principal):
        self.template.add_resource(
            awslambda.Permission("LambdaPermissionPolicy",
                                 DependsOn=name,
                                 Action="lambda:InvokeFunction",
                                 FunctionName=name,
                                 Principal=principal))

    def iam_adder(self, name, managed_policies, role_policy):
        role = iam.Role(name,
                        AssumeRolePolicyDocument=role_policy,
                        RoleName=name,
                        ManagedPolicyArns=managed_policies)
        self.template.add_resource(role)
        return role

    def iam_policy_adder(self, name, policy):
        policy = iam.ManagedPolicy(name,
                                   ManagedPolicyName=name,
                                   PolicyDocument=policy)
        self.template.add_resource(policy)
        return policy

    def cloudfront_adder(self, static_site=True):
        origin_id = Join("", ["S3-", Ref("S3Name"), Ref("Path")])
        if static_site is True:
            origin = Origin(
                Id=origin_id,
                DomainName=Join(
                    "",
                    [Ref("S3Name"), ".s3-website-us-east-1.amazonaws.com"]),
                OriginPath=Ref("Path"),
                CustomOriginConfig=CustomOriginConfig(
                    OriginProtocolPolicy="http-only"))
        else:
            origin = Origin(Id=origin_id,
                            DomainName=Join(
                                "", [Ref("S3Name"), ".s3.amazonaws.com"]),
                            OriginPath=Ref("Path"),
                            S3OriginConfig=S3Origin())
        myDistribution = self.template.add_resource(
            Distribution(
                "myDistribution",
                DistributionConfig=DistributionConfig(
                    Origins=[origin],
                    DefaultCacheBehavior=DefaultCacheBehavior(
                        TargetOriginId=origin_id,
                        ForwardedValues=ForwardedValues(QueryString=False),
                        ViewerProtocolPolicy="redirect-to-https",
                        MinTTL=3600,
                        DefaultTTL=86400,
                        MaxTTL=31536000),
                    ViewerCertificate=ViewerCertificate(
                        AcmCertificateArn=Ref("ACMarn"),
                        SslSupportMethod='sni-only'),
                    Aliases=Ref("URLs"),
                    DefaultRootObject=Ref("rootObject"),
                    Enabled=True,
                    HttpVersion='http2')))

        self.template.add_output([
            Output("DistributionId", Value=Ref(myDistribution)),
            Output("DistributionName",
                   Value=Join(
                       "", ["http://",
                            GetAtt(myDistribution, "DomainName")])),
        ])

    def __generate_blockmap(self, blockmap=None):
        if blockmap is None:
            blockmap = []
        blockmap = [
            ec2.BlockDeviceMapping(DeviceName="/dev/sda1",
                                   Ebs=ec2.EBSBlockDevice(
                                       VolumeSize=Ref("RootVolSize"),
                                       VolumeType="gp2")),
        ]
        app_config = self.config['apps'].values()[0]
        if 'mounts' in app_config:
            for mount in app_config['mounts']:
                blockmap.append(
                    ec2.BlockDeviceMapping(
                        DeviceName=mount['path'],
                        Ebs=ec2.EBSBlockDevice(
                            VolumeSize=Ref("{}VolSize".format(mount['name'])),
                            SnapshotId=If(
                                "{}NotRestoringFromSnapshot".format(
                                    mount['name']), Ref("AWS::NoValue"),
                                Ref("{}SnapID".format(mount['name']))),
                            VolumeType=mount.get('type', 'standard'),
                            DeleteOnTermination=True)))
        return blockmap

    def disk_parameters(self):
        app_config = self.config['apps'].values()[0]
        if 'mounts' in app_config:  # If you want to be able to override Volume size set default to size of snapshot.
            for mount in app_config['mounts']:
                param = Parameter("{}VolSize".format(mount['name']),
                                  Type="String",
                                  Default=mount['size'],
                                  Description="{} EBS Volume size.".format(
                                      mount['name']))
                self.template.add_parameter(param)
                param = Parameter(
                    "{}SnapID".format(mount['name']),
                    Type="String",
                    Default=mount.get("SnapshotId", ""),
                    Description=
                    "{} EBS Volume Snapshot(Leave Blank to use new Volume).".
                    format(mount['name']))
                self.template.add_parameter(param)
                condition_name = "{}NotRestoringFromSnapshot".format(
                    mount['name'])
                self.template.add_condition(
                    condition_name,
                    Equals(Ref("{}SnapID".format(mount['name'])), ""))
Exemple #15
0
class BaseTemplate(object):

    def __init__(self):
        self._template = Template()

    def add_description(self):
        self._template.add_description(
            self.description
        )

    def add_parameters(self):
        for key, values in self.parameters.items():
            self._template.add_parameter(
                Parameter(key, **values)
            )

    def add_conditions(self):
        for key, condition in self.conditions.items():
            self._template.add_condition(
                key, condition
            )

    def add_mappings(self):
        for key, mapping in self.mappings.items():
            self._template.add_mapping(
                key, mapping
            )

    def add_outputs(self):
        for logical_id, keys in self.outputs.items():
            self._template.add_output(
                Output(logical_id, **keys)
            )

    def add_resource(self, attr_name):
        attr = getattr(self, attr_name)
        if isinstance(attr, list):
            if self.verify_list_of_resources(attr):
                for item in attr:
                    self._template.add_resource(item)
        elif isinstance(attr, BaseAWSObject):
            self._template.add_resource(attr)

    def verify_list_of_resources(self, attr):
        for item in attr:
            if not isinstance(item, BaseAWSObject):
                return False
        return True

    def render(self, mappings):
        if not mappings:
            mappings = {}
        self.mappings = mappings
        self.add_mappings()

        valid_attributes = get_class_attrs(self.__class__)
        for attr in valid_attributes:
            add_method = getattr(
                self, 'add_%s' % attr, None
            )
            if add_method:
                add_method()
            else:
                self.add_resource(attr)

        return self._template.to_json()
def main(args):
    number_of_vol = 5

    t = Template()
    availability_zone = t.add_parameter(
        Parameter(
            "AvailabilityZone",
            Type="String",
            Description="Availability Zone the cluster will launch into. " "THIS IS REQUIRED",
        )
    )
    raid_options = t.add_parameter(
        Parameter(
            "RAIDOptions",
            Type="CommaDelimitedList",
            Description="Comma separated list of RAID related options, "
            "8 parameters in total, "
            "["
            "0 shared_dir,"
            "1 raid_type,"
            "2 num_of_vols,"
            "3 vol_type,"
            "4 vol_size,"
            "5 vol_IOPS,"
            "6 encrypted, "
            "7 ebs_kms_key]",
        )
    )
    use_vol = [None] * number_of_vol
    v = [None] * number_of_vol

    for i in range(number_of_vol):
        if i == 0:
            use_vol[i] = t.add_condition("UseVol%s" % (i + 1), Not(Equals(Select("0", Ref(raid_options)), "NONE")))
        else:
            use_vol[i] = t.add_condition(
                "UseVol%s" % (i + 1),
                And(Not(Equals(Select("2", Ref(raid_options)), str(i))), Condition(use_vol[i - 1])),
            )

        use_ebs_iops = t.add_condition("Vol%s_UseEBSPIOPS" % (i + 1), Equals(Select("3", Ref(raid_options)), "io1"))
        use_volume_size = t.add_condition(
            "Vol%s_UseVolumeSize" % (i + 1), Not(Equals(Select("4", Ref(raid_options)), "NONE"))
        )
        use_volume_type = t.add_condition(
            "Vol%s_UseVolumeType" % (i + 1), Not(Equals(Select("3", Ref(raid_options)), "NONE"))
        )
        use_ebs_encryption = t.add_condition(
            "Vol%s_UseEBSEncryption" % (i + 1), Equals(Select("6", Ref(raid_options)), "true")
        )
        use_ebs_kms_key = t.add_condition(
            "Vol%s_UseEBSKMSKey" % (i + 1),
            And(Condition(use_ebs_encryption), Not(Equals(Select("7", Ref(raid_options)), "NONE"))),
        )
        v[i] = t.add_resource(
            ec2.Volume(
                "Volume%s" % (i + 1),
                AvailabilityZone=Ref(availability_zone),
                VolumeType=If(use_volume_type, Select("3", Ref(raid_options)), "gp2"),
                Size=If(use_volume_size, Select("4", Ref(raid_options)), 20),
                Iops=If(use_ebs_iops, Select("5", Ref(raid_options)), NoValue),
                Encrypted=If(use_ebs_encryption, Select("6", Ref(raid_options)), NoValue),
                KmsKeyId=If(use_ebs_kms_key, Select("7", Ref(raid_options)), NoValue),
                Condition=use_vol[i],
            )
        )

    outputs = [None] * number_of_vol
    vol_to_return = [None] * number_of_vol
    for i in range(number_of_vol):
        vol_to_return[i] = Ref(v[i])
        if i == 0:
            outputs[i] = If(use_vol[i], vol_to_return[i], "NONE")
        else:
            outputs[i] = If(use_vol[i], Join(",", vol_to_return[: (i + 1)]), outputs[i - 1])

    t.add_output(
        Output("Volumeids", Description="Volume IDs of the resulted RAID EBS volumes", Value=outputs[number_of_vol - 1])
    )

    json_file_path = args.target_path
    output_file = open(json_file_path, "w")
    output_file.write(t.to_json())
    output_file.close()
    ConstraintDescription="Must be yes or no only.",
    Type="String",
    Description="Use EBS Volumes for the Master Node",
    AllowedValues=["yes", "no"],
))

WorkerUseEBS = t.add_parameter(Parameter(
    "WorkerUseEBS",
    Default="no",
    ConstraintDescription="Must be yes or no only.",
    Type="String",
    Description="Use EBS Volumes for the Worker Node",
    AllowedValues=["yes", "no"],
))

MasterUseEBSBool = t.add_condition("MasterUseEBSBool", Equals(Ref("MasterUseEBS"),"yes"))
WorkerUseEBSBool = t.add_condition("WorkerUseEBSBool", Equals(Ref("WorkerUseEBS"),"yes"))

t.add_mapping("SubnetConfig",
    {'Public': {'CIDR': '10.0.0.0/24'}, 'VPC': {'CIDR': '10.0.0.0/16'}}
)

t.add_mapping("RHEL66",
    {'ap-northeast-1': {'AMI': 'ami-a15666a0'},
     'ap-southeast-1': {'AMI': 'ami-3813326a'},
     'ap-southeast-2': {'AMI': 'ami-55e38e6f'},
     'eu-west-1': {'AMI': 'ami-9cfd53eb'},
     'sa-east-1': {'AMI': 'ami-995ce884'},
     'us-east-1': {'AMI': 'ami-aed06ac6'},
     'us-west-1': {'AMI': 'ami-69ccd92c'},
     'us-west-2': {'AMI': 'ami-5fbcf36f'}}
Exemple #18
0
def generate_cloudformation_template():
    enable_elb = sys.argv[1]
    input_scaling_policies = ast.literal_eval(sys.argv[2])
    input_alarms = ast.literal_eval(sys.argv[3])

    enable_elb = enable_elb == 'True'
    elb_listeners = ast.literal_eval(sys.argv[4])

    template = Template()

    template.add_description("""\
    Configures Auto Scaling Group for the app""")

    project_name = template.add_parameter(Parameter(
        "Name",
        Type="String",
        Description="Instances will be tagged with this name",
    ))

    scalecapacity = template.add_parameter(Parameter(
        "ScaleCapacity",
        Default="1",
        Type="String",
        Description="Number of api servers to run",
    ))

    minsize = template.add_parameter(Parameter(
        "MinScale",
        Type="String",
        Description="Minimum number of servers to keep in the ASG",
    ))

    maxsize = template.add_parameter(Parameter(
        "MaxScale",
        Type="String",
        Description="Maximum number of servers to keep in the ASG",
    ))

    signalcount = template.add_parameter(Parameter(
        "SignalCount",
        Default="1",
        Type="String",
        Description="No. of signals CF must receive before it sets the status as CREATE_COMPLETE",
    ))

    signaltimeout = template.add_parameter(Parameter(
        "SignalTimeout",
        Default="PT5M",
        Type="String",
        Description="Time that CF waits for the number of signals that was specified in Count ",
    ))

    minsuccessfulinstancespercent = template.add_parameter(Parameter(
        "MinSuccessfulInstancesPercent",
        Default="100",
        Type="String",
        Description="% instances in a rolling update that must signal success for CF to succeed",
    ))

    environment = template.add_parameter(Parameter(
        "Environment",
        Type="String",
        Description="The environment being deployed into",
    ))

    subnet = template.add_parameter(Parameter(
        "Subnets",
        Type="CommaDelimitedList",
    ))

    launchconfigurationname = template.add_parameter(Parameter(
        "LaunchConfigurationName",
        Type="String",
    ))

    health_check_grace_period = template.add_parameter(Parameter(
        "HealthCheckGracePeriod",
        Type="String",
        Default="300",
    ))

    if enable_elb:
        elb_subnets = template.add_parameter(Parameter(
            "LoadBalancerSubnets",
            Type="CommaDelimitedList",
        ))

        elb_bucket_name = template.add_parameter(Parameter(
            "LoadBalancerBucketName",
            Type="String",
            Description="S3 Bucket for the ELB access logs"
        ))

        template.add_condition("ElbLoggingCondition", Not(Equals(Ref(elb_bucket_name), "")))

        elb_schema = template.add_parameter(Parameter(
            "LoadBalancerSchema",
            Type="String",
        ))

        health_check_interval = template.add_parameter(Parameter(
            "LoadBalancerHealthCheckInterval",
            Type="String",
        ))

        health_check_timeout = template.add_parameter(Parameter(
            "LoadBalancerHealthCheckTimeout",
            Type="String",
        ))

        healthy_threshold = template.add_parameter(Parameter(
            "LoadBalancerHealthyThreshold",
            Type="String",
        ))

        unhealthy_threshold = template.add_parameter(Parameter(
            "LoadBalancerUnHealthyThreshold",
            Type="String",
        ))

        enable_connection_draining = template.add_parameter(Parameter(
            "LoadBalancerEnableConnectionDraining",
            Type="String",
            Default="True",
        ))

        connection_draining_timeout = template.add_parameter(Parameter(
            "LoadBalancerConnectionDrainingTimeout",
            Type="String",
            Default="30",
        ))

        loadbalancersecuritygroup = template.add_parameter(Parameter(
            "LoadBalancerSecurityGroup",
            Type="CommaDelimitedList",
            Description="Security group for api app load balancer.",
        ))

        hostedzone = template.add_parameter(Parameter(
            "HostedZoneName",
            Description="The DNS name of an existing Amazon Route 53 hosted zone",
            Type="String",
        ))

        dns_record = template.add_parameter(Parameter(
            "DNSRecord",
            Type="String",
        ))

        dns_ttl = template.add_parameter(Parameter(
            "DNSTTL",
            Default="300",
            Type="String",
        ))

        new_weight = template.add_parameter(Parameter(
            "NewDnsWeight",
            Type="String",
            Default="100",
        ))

        health_check_protocol = template.add_parameter(Parameter(
            "LoadBalancerHealthCheckProtocol",
            Type="String",
        ))

        template.add_condition("ElbTCPProtocolCondition", Equals(Ref(health_check_protocol), "TCP"))

        health_check_port = template.add_parameter(Parameter(
            "LoadBalancerHealthCheckPort",
            Type="String",
        ))

        health_check_path = template.add_parameter(Parameter(
            "LoadBalancerHealthCheckPath",
            Type="String",
        ))

        load_balancer_listeners = []
        for index, listener in enumerate(elb_listeners):
            template.add_condition("SSLCertificateCondition" + str(index), Equals(listener['protocol'], "https"))
            load_balancer_listeners.append(elb.Listener(
                LoadBalancerPort=listener['load_balancer_port'],
                InstancePort=listener['instance_port'],
                Protocol=listener['protocol'],
                InstanceProtocol=Ref(health_check_protocol),
                SSLCertificateId=If("SSLCertificateCondition" + str(index),
                                    listener.get('ssl_certificate_id', ''),
                                    Ref("AWS::NoValue")),
            ))

        loadbalancer = template.add_resource(elb.LoadBalancer(
            "LoadBalancer",
            AccessLoggingPolicy=If("ElbLoggingCondition",
                                   elb.AccessLoggingPolicy(
                                       EmitInterval=60,
                                       Enabled=True,
                                       S3BucketName=Ref(elb_bucket_name),
                                       S3BucketPrefix="ELBLogs"),
                                   Ref("AWS::NoValue")),
            ConnectionDrainingPolicy=elb.ConnectionDrainingPolicy(
                Enabled=Ref(enable_connection_draining),
                Timeout=Ref(connection_draining_timeout),
            ),
            Subnets=Ref(elb_subnets),
            HealthCheck=elb.HealthCheck(
                Target=Join("", [Ref(health_check_protocol), ":", Ref(health_check_port), If("ElbTCPProtocolCondition",
                                                                                             Ref("AWS::NoValue"),
                                                                                             Ref(health_check_path))
                                 ]),
                HealthyThreshold=Ref(healthy_threshold),
                UnhealthyThreshold=Ref(unhealthy_threshold),
                Interval=Ref(health_check_interval),
                Timeout=Ref(health_check_timeout),
            ),
            Listeners=load_balancer_listeners,
            CrossZone=True,
            SecurityGroups=Ref(loadbalancersecuritygroup),
            Scheme=Ref(elb_schema)
        ))

        route53record = template.add_resource(RecordSetType(
            "DNS",
            HostedZoneName=Join("", [Ref(hostedzone), "."]),
            Name=Join("", [Ref(dns_record), ".", Ref(hostedzone), "."]),
            ResourceRecords=[GetAtt(loadbalancer, "DNSName")],
            SetIdentifier=Ref(project_name),
            TTL=Ref(dns_ttl),
            Type="CNAME",
            Weight=Ref(new_weight),
        ))

    autoscalinggroup = template.add_resource(AutoScalingGroup(
        "AutoscalingGroup",
        Tags=[
            Tag("Name", Ref(project_name), True),
            Tag("Environment", Ref(environment), True)
        ],
        LaunchConfigurationName=Ref(launchconfigurationname),
        MinSize=Ref(minsize),
        MaxSize=Ref(maxsize),
        DesiredCapacity=Ref(scalecapacity),
        VPCZoneIdentifier=Ref(subnet),
        HealthCheckGracePeriod=Ref(health_check_grace_period),
        CreationPolicy=CreationPolicy(
            ResourceSignal=ResourceSignal(
                Count=Ref(signalcount),
                Timeout=Ref(signaltimeout)
            ),
            AutoScalingCreationPolicy=AutoScalingCreationPolicy(
                MinSuccessfulInstancesPercent=Ref(minsuccessfulinstancespercent)
            )
        ),
        UpdatePolicy=UpdatePolicy(
            AutoScalingRollingUpdate=AutoScalingRollingUpdate(
                MaxBatchSize='1',
                MinInstancesInService='1',
                MinSuccessfulInstancesPercent=Ref(minsuccessfulinstancespercent),
                PauseTime=Ref(signaltimeout),
                WaitOnResourceSignals=True
            )
        )
    ))

    autoscalinggroup.HealthCheckType = 'EC2'
    if enable_elb:
        autoscalinggroup.LoadBalancerNames = [Ref(loadbalancer)]
        autoscalinggroup.HealthCheckType = 'ELB'

    created_scaling_policies = dict()
    for scaling_policy in input_scaling_policies:
        policy_properties = {
            'AdjustmentType': scaling_policy['adjustment_type'],
            'AutoScalingGroupName': Ref(autoscalinggroup),
            'Cooldown': scaling_policy['cooldown'],
            'PolicyType': scaling_policy['policy_type'],
            'ScalingAdjustment': scaling_policy['scaling_adjustment'],
        }
        if scaling_policy['policy_type'] != "SimpleScaling" \
                and 'estimated_instance_warmup' in scaling_policy:
            policy_properties['EstimatedInstanceWarmup'] = \
                scaling_policy['estimated_instance_warmup']

        if scaling_policy['policy_type'] != "SimpleScaling" \
                and 'metric_aggregation_type' in scaling_policy:
            policy_properties['MetricAggregationType'] = scaling_policy['metric_aggregation_type']

        if scaling_policy['adjustment_type'] == "PercentChangeInCapacity" \
                and 'min_adjustment_magnitude' in scaling_policy:
            policy_properties['MinAdjustmentMagnitude'] = scaling_policy['min_adjustment_magnitude']

        if 'step_adjustments' in scaling_policy:
            policy_properties['StepAdjustments'] = scaling_policy['step_adjustments']

        created_scaling_policies[scaling_policy['name']] = template.add_resource(ScalingPolicy(
            scaling_policy['name'],
            **policy_properties
        ))

    for alarm in input_alarms:
        template.add_resource(
            Alarm(
                alarm['name'],
                ActionsEnabled=True,
                AlarmActions=[Ref(created_scaling_policies[alarm['scaling_policy_name']])],
                AlarmDescription=alarm['description'],
                ComparisonOperator=alarm['comparison'],
                Dimensions=[
                    MetricDimension(
                        Name="AutoScalingGroupName",
                        Value=Ref(autoscalinggroup)
                    ),
                ],
                EvaluationPeriods=alarm['evaluation_periods'],
                InsufficientDataActions=[],
                MetricName=alarm['metric'],
                Namespace=alarm['namespace'],
                OKActions=[],
                Period=alarm['period'],
                Statistic=alarm['statistics'],
                Threshold=str(alarm['threshold']),
                Unit=alarm['unit'],
            )
        )

    template.add_output(Output("StackName", Value=Ref(project_name), Description="Stack Name"))
    if enable_elb:
        template.add_output(Output("DomainName", Value=Ref(route53record),
                                   Description="DNS to access the service"))
        template.add_output(Output("LoadBalancer", Value=GetAtt(loadbalancer, "DNSName"),
                                   Description="ELB dns"))
    template.add_output(Output("AutoScalingGroup", Value=Ref(autoscalinggroup),
                               Description="Auto Scaling Group"))
    template.add_output(Output("LaunchConfiguration", Value=Ref(launchconfigurationname),
                               Description="LaunchConfiguration for this deploy"))

    return template
    "KeyName",
    ConstraintDescription="Can contain only ASCII characters.",
    Type="AWS::EC2::KeyPair::KeyName",
    Description="Name of an existing EC2 KeyPair to enable SSH access to the instance",
))

UseEBS = t.add_parameter(Parameter(
    "UseEBS",
    Default="no",
    ConstraintDescription="Must be yes or no only.",
    Type="String",
    Description="Use EBS Volumes for the Worker Node",
    AllowedValues=["yes", "no"],
))

UseEBSBool = t.add_condition("UseEBSBool", Equals(Ref(UseEBS),"yes"))

t.add_mapping("SubnetConfig",
    {'Public': {'CIDR': '10.0.0.0/24'}, 'VPC': {'CIDR': '10.0.0.0/16'}}
)

t.add_mapping("RHEL66",
    {'ap-northeast-1': {'AMI': 'ami-a15666a0'},
     'ap-southeast-1': {'AMI': 'ami-3813326a'},
     'ap-southeast-2': {'AMI': 'ami-55e38e6f'},
     'eu-west-1': {'AMI': 'ami-9cfd53eb'},
     'sa-east-1': {'AMI': 'ami-995ce884'},
     'us-east-1': {'AMI': 'ami-aed06ac6'},
     'us-west-1': {'AMI': 'ami-69ccd92c'},
     'us-west-2': {'AMI': 'ami-5fbcf36f'}}
)
Exemple #20
0
def main():

    t = Template()
    t.set_description("test instance launch")
    t.set_version("2010-09-09")

    InstUserData = [
        '#!/usr/bin/env bash\n',
        '\n',
        'set -x\n',
        '\n',
        'my_wait_handle="',
        Ref('InstanceWaitHandle'),
        '"\n',
        'curl -X PUT -H \'Content-Type:\' --data-binary \'{ "Status" : "SUCCESS",  "Reason" : "Instance launched",  "UniqueId" : "launch001",  "Data" : "Instance launched."}\'  "${my_wait_handle}"',
        '\n',
        '\n',
    ]

    EC2KeyName = t.add_parameter(
        Parameter(
            'EC2KeyName',
            Type="AWS::EC2::KeyPair::KeyName",
            Description=
            "Name of an existing EC2 KeyPair to enable SSH access to the instance.",
            ConstraintDescription="REQUIRED: Must be a valud EC2 key pair",
        ))

    OperatingSystem = t.add_parameter(
        Parameter('OperatingSystem',
                  Type="String",
                  Description="Operating System",
                  Default="centos7",
                  AllowedValues=[
                      "alinux2",
                      "centos7",
                      "rhel7",
                  ],
                  ConstraintDescription="Must be: alinux2, centos7, rhel7"))

    myInstanceType = t.add_parameter(
        Parameter(
            'MyInstanceType',
            Type="String",
            Description="Instance type",
            Default="m5.2xlarge",
        ))

    VpcId = t.add_parameter(
        Parameter(
            'VpcId',
            Type="AWS::EC2::VPC::Id",
            Description="VPC Id for this instance",
        ))

    Subnet = t.add_parameter(
        Parameter('Subnet',
                  Type="AWS::EC2::Subnet::Id",
                  Description="Subnet IDs"))

    ExistingSecurityGroup = t.add_parameter(
        Parameter(
            'ExistingSecurityGroup',
            Type="AWS::EC2::SecurityGroup::Id",
            Description=
            "OPTIONAL: Choose an existing Security Group ID, e.g. sg-abcd1234")
    )

    UsePublicIp = t.add_parameter(
        Parameter(
            'UsePublicIp',
            Type="String",
            Description="Should a public IP address be given to the instance",
            Default="true",
            ConstraintDescription="true/false",
            AllowedValues=["true", "false"]))

    SshAccessCidr = t.add_parameter(
        Parameter(
            'SshAccessCidr',
            Type="String",
            Description="CIDR Block for SSH access, default 127.0.0.1/32",
            Default="127.0.0.1/32",
            AllowedPattern=
            "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})",
            ConstraintDescription="Must be a valid CIDR x.x.x.x/x"))

    RootRole = t.add_resource(
        iam.Role("RootRole",
                 AssumeRolePolicyDocument={
                     "Statement": [{
                         "Effect": "Allow",
                         "Principal": {
                             "Service": ["ec2.amazonaws.com"]
                         },
                         "Action": ["sts:AssumeRole"]
                     }]
                 }))

    SshSecurityGroup = t.add_resource(
        SecurityGroup("SshSecurityGroup",
                      VpcId=Ref(VpcId),
                      GroupDescription="SSH Secuirty group",
                      SecurityGroupIngress=[
                          ec2.SecurityGroupRule(
                              IpProtocol="tcp",
                              FromPort="22",
                              ToPort="22",
                              CidrIp=Ref(SshAccessCidr),
                          ),
                      ]))

    RootInstanceProfile = t.add_resource(
        InstanceProfile("RootInstanceProfile", Roles=[Ref(RootRole)]))

    tags = Tags(Name=Ref("AWS::StackName"))

    myInstance = t.add_resource(
        ec2.Instance(
            'MyInstance',
            ImageId=FindInMap("AWSRegionAMI", Ref("AWS::Region"),
                              Ref(OperatingSystem)),
            KeyName=Ref(EC2KeyName),
            InstanceType=(Ref(myInstanceType)),
            NetworkInterfaces=[
                NetworkInterfaceProperty(
                    GroupSet=If(
                        "not_existing_sg", [Ref(SshSecurityGroup)],
                        [Ref(SshSecurityGroup),
                         Ref(ExistingSecurityGroup)]),
                    AssociatePublicIpAddress=Ref(UsePublicIp),
                    DeviceIndex='0',
                    DeleteOnTermination='true',
                    SubnetId=Ref(Subnet))
            ],
            IamInstanceProfile=(Ref(RootInstanceProfile)),
            UserData=Base64(Join('', InstUserData)),
        ))

    t.add_mapping(
        'AWSRegionAMI', {
            "ap-northeast-1": {
                "centos7": "ami-8e8847f1",
                "rhel7": "ami-6b0d5f0d"
            },
            "ap-northeast-2": {
                "centos7": "ami-bf9c36d1",
                "rhel7": "ami-3eee4150"
            },
            "ap-south-1": {
                "centos7": "ami-1780a878",
                "rhel7": "ami-5b673c34"
            },
            "ap-southeast-1": {
                "centos7": "ami-8e0205f2",
                "rhel7": "ami-76144b0a"
            },
            "ap-southeast-2": {
                "centos7": "ami-d8c21dba",
                "rhel7": "ami-67589505"
            },
            "ca-central-1": {
                "centos7": "ami-e802818c",
                "rhel7": "ami-49f0762d"
            },
            "eu-central-1": {
                "centos7": "ami-dd3c0f36",
                "rhel7": "ami-c86c3f23"
            },
            "eu-west-1": {
                "centos7": "ami-3548444c",
                "rhel7": "ami-7c491f05"
            },
            "eu-west-2": {
                "centos7": "ami-00846a67",
                "rhel7": "ami-7c1bfd1b"
            },
            "eu-west-3": {
                "centos7": "ami-262e9f5b",
                "rhel7": "ami-5026902d"
            },
            "sa-east-1": {
                "centos7": "ami-cb5803a7",
                "rhel7": "ami-b0b7e3dc"
            },
            "us-east-1": {
                "centos7": "ami-9887c6e7",
                "rhel7": "ami-6871a115"
            },
            "us-east-2": {
                "centos7": "ami-9c0638f9",
                "rhel7": "ami-03291866"
            },
            "us-west-1": {
                "centos7": "ami-4826c22b",
                "rhel7": "ami-18726478"
            },
            "us-west-2": {
                "centos7": "ami-3ecc8f46",
                "rhel7": "ami-28e07e50"
            }
        })

    t.add_condition("not_existing_sg", Equals(Ref(ExistingSecurityGroup), ""))

    t.add_condition("Has_Public_Ip", Equals(Ref(UsePublicIp), "true"))

    mywaithandle = t.add_resource(WaitConditionHandle('InstanceWaitHandle'))

    mywaitcondition = t.add_resource(
        WaitCondition("InstanceWaitCondition",
                      Handle=Ref(mywaithandle),
                      Timeout="1500",
                      DependsOn="MyInstance"))

    t.add_output([
        Output("InstanceID", Description="Instance ID", Value=Ref(myInstance))
    ])

    t.add_output(
        [Output("InstancePrivateIP", Value=GetAtt('MyInstance', 'PrivateIp'))])

    t.add_output([
        Output("InstancePublicIP",
               Value=GetAtt('MyInstance', 'PublicIp'),
               Condition="Has_Public_Ip")
    ])

    ##print(t.to_yaml())
    print(t.to_json(indent=2))
    Description="EC2 instance type",
    Default="m3.medium",
    AllowedValues=[
        "m3.medium", "m3.large", "m3.xlarge", "m3.2xlarge",
        "c1.medium", "c1.xlarge", "cc1.4xlarge", "cc2.8xlarge", "cg1.4xlarge"
    ],
    ConstraintDescription="Must be a valid, EC2 classic "
                          "compatible instance type.",
))

# Conditions
template.add_condition(
    "EIPProvided",
    Not(
        Equals(
            Ref("EIP"),
            "0.0.0.0"
        )
    )
)

# Define the instance security group
instance_sg = template.add_resource(
    ec2.SecurityGroup(
        "InstanceSecurityGroup",
        GroupDescription="Enable SSH access to the world",
        SecurityGroupIngress=[
            ec2.SecurityGroupRule(
                IpProtocol="tcp",
                FromPort="22",
                ToPort="22",
def main(args):
    number_of_vol = 5

    t = Template()
    availability_zone = t.add_parameter(
        Parameter(
            "AvailabilityZone",
            Type="String",
            Description="Availability Zone the cluster will launch into. THIS IS REQUIRED",
        )
    )
    volume_size = t.add_parameter(
        Parameter(
            "VolumeSize", Type="CommaDelimitedList", Description="Size of EBS volume in GB, if creating a new one"
        )
    )
    volume_type = t.add_parameter(
        Parameter(
            "VolumeType", Type="CommaDelimitedList", Description="Type of volume to create either new or from snapshot"
        )
    )
    volume_iops = t.add_parameter(
        Parameter(
            "VolumeIOPS",
            Type="CommaDelimitedList",
            Description="Number of IOPS for volume type io1. Not used for other volume types.",
        )
    )
    ebs_encryption = t.add_parameter(
        Parameter(
            "EBSEncryption",
            Type="CommaDelimitedList",
            Description="Boolean flag to use EBS encryption for /shared volume. " "(Not to be used for snapshots)",
        )
    )
    ebs_kms_id = t.add_parameter(
        Parameter(
            "EBSKMSKeyId",
            Type="CommaDelimitedList",
            Description="KMS ARN for customer created master key, will be used for EBS encryption",
        )
    )
    ebs_volume_id = t.add_parameter(
        Parameter("EBSVolumeId", Type="CommaDelimitedList", Description="Existing EBS volume Id")
    )
    ebs_snapshot_id = t.add_parameter(
        Parameter(
            "EBSSnapshotId",
            Type="CommaDelimitedList",
            Description="Id of EBS snapshot if using snapshot as source for volume",
        )
    )
    ebs_vol_num = t.add_parameter(
        Parameter(
            "NumberOfEBSVol",
            Type="Number",
            Description="Number of EBS Volumes the user requested, up to %s" % number_of_vol,
        )
    )

    use_vol = [None] * number_of_vol
    use_existing_ebs_volume = [None] * number_of_vol
    v = [None] * number_of_vol

    for i in range(number_of_vol):
        if i == 0:
            create_vol = t.add_condition(
                "Vol%s_CreateEBSVolume" % (i + 1), Equals(Select(str(i), Ref(ebs_volume_id)), "NONE")
            )
        elif i == 1:
            use_vol[i] = t.add_condition("UseVol%s" % (i + 1), Not(Equals(Ref(ebs_vol_num), str(i))))
            create_vol = t.add_condition(
                "Vol%s_CreateEBSVolume" % (i + 1),
                And(Condition(use_vol[i]), Equals(Select(str(i), Ref(ebs_volume_id)), "NONE")),
            )
        else:
            use_vol[i] = t.add_condition(
                "UseVol%s" % (i + 1), And(Not(Equals(Ref(ebs_vol_num), str(i))), Condition(use_vol[i - 1]))
            )
            create_vol = t.add_condition(
                "Vol%s_CreateEBSVolume" % (i + 1),
                And(Condition(use_vol[i]), Equals(Select(str(i), Ref(ebs_volume_id)), "NONE")),
            )

        use_ebs_iops = t.add_condition("Vol%s_UseEBSPIOPS" % (i + 1), Equals(Select(str(i), Ref(volume_type)), "io1"))
        use_vol_size = t.add_condition(
            "Vol%s_UseVolumeSize" % (i + 1), Not(Equals(Select(str(i), Ref(volume_size)), "NONE"))
        )
        use_vol_type = t.add_condition(
            "Vol%s_UseVolumeType" % (i + 1), Not(Equals(Select(str(i), Ref(volume_type)), "NONE"))
        )
        use_ebs_encryption = t.add_condition(
            "Vol%s_UseEBSEncryption" % (i + 1), Equals(Select(str(i), Ref(ebs_encryption)), "true")
        )
        use_ebs_kms_key = t.add_condition(
            "Vol%s_UseEBSKMSKey" % (i + 1),
            And(Condition(use_ebs_encryption), Not(Equals(Select(str(i), Ref(ebs_kms_id)), "NONE"))),
        )
        use_ebs_snapshot = t.add_condition(
            "Vol%s_UseEBSSnapshot" % (i + 1), Not(Equals(Select(str(i), Ref(ebs_snapshot_id)), "NONE"))
        )
        use_existing_ebs_volume[i] = t.add_condition(
            "Vol%s_UseExistingEBSVolume" % (i + 1), Not(Equals(Select(str(i), Ref(ebs_volume_id)), "NONE"))
        )
        v[i] = t.add_resource(
            ec2.Volume(
                "Volume%s" % (i + 1),
                AvailabilityZone=Ref(availability_zone),
                VolumeType=If(use_vol_type, Select(str(i), Ref(volume_type)), "gp2"),
                Size=If(use_ebs_snapshot, NoValue, If(use_vol_size, Select(str(i), Ref(volume_size)), "20")),
                SnapshotId=If(use_ebs_snapshot, Select(str(i), Ref(ebs_snapshot_id)), NoValue),
                Iops=If(use_ebs_iops, Select(str(i), Ref(volume_iops)), NoValue),
                Encrypted=If(use_ebs_encryption, Select(str(i), Ref(ebs_encryption)), NoValue),
                KmsKeyId=If(use_ebs_kms_key, Select(str(i), Ref(ebs_kms_id)), NoValue),
                Condition=create_vol,
            )
        )

    outputs = [None] * number_of_vol
    vol_to_return = [None] * number_of_vol
    for i in range(number_of_vol):
        vol_to_return[i] = If(use_existing_ebs_volume[i], Select(str(i), Ref(ebs_volume_id)), Ref(v[i]))
        if i == 0:
            outputs[i] = vol_to_return[i]
        else:
            outputs[i] = If(use_vol[i], Join(",", vol_to_return[: (i + 1)]), outputs[i - 1])

    t.add_output(
        Output("Volumeids", Description="Volume IDs of the resulted EBS volumes", Value=outputs[number_of_vol - 1])
    )

    json_file_path = args.target_path
    output_file = open(json_file_path, "w")
    output_file.write(t.to_json())
    output_file.close()
def main(args):
    t = Template()

    # [0 shared_dir, 1 efs_fs_id, 2 performance_mode, 3 efs_kms_key_id,
    # 4 provisioned_throughput, 5 encrypted, 6 throughput_mode, 7 exists_valid_mt]
    efs_options = t.add_parameter(
        Parameter(
            "EFSOptions",
            Type="CommaDelimitedList",
            Description="Comma separated list of efs related options, " "8 parameters in total",
        )
    )
    compute_security_group = t.add_parameter(
        Parameter("ComputeSecurityGroup", Type="String", Description="SecurityGroup for Mount Target")
    )
    subnet_id = t.add_parameter(Parameter("SubnetId", Type="String", Description="SubnetId for Mount Target"))
    create_efs = t.add_condition(
        "CreateEFS",
        And(Not(Equals(Select(str(0), Ref(efs_options)), "NONE")), Equals(Select(str(1), Ref(efs_options)), "NONE")),
    )
    create_mt = t.add_condition(
        "CreateMT",
        And(Not(Equals(Select(str(0), Ref(efs_options)), "NONE")), Equals(Select(str(7), Ref(efs_options)), "NONE")),
    )
    use_performance_mode = t.add_condition("UsePerformanceMode", Not(Equals(Select(str(2), Ref(efs_options)), "NONE")))
    use_efs_encryption = t.add_condition("UseEFSEncryption", Equals(Select(str(5), Ref(efs_options)), "true"))
    use_efs_kms_key = t.add_condition(
        "UseEFSKMSKey", And(Condition(use_efs_encryption), Not(Equals(Select(str(3), Ref(efs_options)), "NONE")))
    )
    use_throughput_mode = t.add_condition("UseThroughputMode", Not(Equals(Select(str(6), Ref(efs_options)), "NONE")))
    use_provisioned = t.add_condition("UseProvisioned", Equals(Select(str(6), Ref(efs_options)), "provisioned"))
    use_provisioned_throughput = t.add_condition(
        "UseProvisionedThroughput",
        And(Condition(use_provisioned), Not(Equals(Select(str(4), Ref(efs_options)), "NONE"))),
    )

    fs = t.add_resource(
        FileSystem(
            "EFSFS",
            PerformanceMode=If(use_performance_mode, Select(str(2), Ref(efs_options)), NoValue),
            ProvisionedThroughputInMibps=If(use_provisioned_throughput, Select(str(4), Ref(efs_options)), NoValue),
            ThroughputMode=If(use_throughput_mode, Select(str(6), Ref(efs_options)), NoValue),
            Encrypted=If(use_efs_encryption, Select(str(5), Ref(efs_options)), NoValue),
            KmsKeyId=If(use_efs_kms_key, Select(str(3), Ref(efs_options)), NoValue),
            Condition=create_efs,
        )
    )

    mt = t.add_resource(
        MountTarget(
            "EFSMT",
            FileSystemId=If(create_efs, Ref(fs), Select(str(1), Ref(efs_options))),
            SecurityGroups=[Ref(compute_security_group)],
            SubnetId=Ref(subnet_id),
            Condition=create_mt,
        )
    )

    t.add_output(
        Output(
            "FileSystemId",
            Description="ID of the FileSystem",
            Value=If(create_efs, Ref(fs), Select("1", Ref(efs_options))),
        )
    )

    # Specify output file path
    json_file_path = args.target_path
    output_file = open(json_file_path, "w")
    output_file.write(t.to_json())
    output_file.close()
    Parameter(
        "LambdaBucket",
        Description=
        "The S3 Bucket that contains the zip to bootstrap your lambda function",
        Type="String",
        Default="galileo-babel-lambda"))

t.add_parameter(
    Parameter(
        "S3Key",
        Description=
        "The S3 key that references the zip to bootstrap your lambda function.",
        Type="String",
        Default="GalileoBabel.zip"))

t.add_condition("IsInt", Equals(Ref("LambdaEnv"), "int"))
t.add_condition("IsTest", Equals(Ref("LambdaEnv"), "test"))
t.add_condition("IsLive", Equals(Ref("LambdaEnv"), "live"))

t.add_resource(
    Role(
        "LambdaExecutionRole",
        Policies=[
            iam.Policy(
                PolicyName="FunctionRolePolicy",
                PolicyDocument=Policy(Statement=[
                    Statement(Effect=Allow,
                              Action=[
                                  Action("logs", "CreateLogGroup"),
                                  Action("logs", "CreateLogStream"),
                                  Action("logs", "PutLogEvents"),
s3_bucket_parameter = template.add_parameter(Parameter(
    "S3BucketParameter",
    Type="String",
    Description="Name of the S3 bucket where you uploaded the source code zip",
))

source_zip_parameter = template.add_parameter(Parameter(
    "SourceZipParameter",
    Type="String",
    Default="backup-rds.zip",
    Description="Name of the zip file inside the S3 bucket",
))


template.add_condition("UseAllDatabases", Equals(Join("", Ref(databases_to_use_parameter)), ""))
template.add_condition("UseEncryption", Equals(Ref(kms_key_parameter), ""), )
template.add_condition("IncludeAurora", Equals(Ref(include_aurora_clusters_parameter), "Yes"))

template.add_metadata({
    "AWS::CloudFormation::Interface": {
        "ParameterGroups": [
            {
                "Label": {
                    "default": "Basic configuration"
                },
                "Parameters": [
                    "TargetRegionParameter",
                    "S3BucketParameter",
                    "SourceZipParameter",
                ]
def main(argv):
    FILE = None
    try:
        opts, args = getopt.getopt(argv,"hf:",["FILE="])
    except getopt.GetoptError:
        print sys.argv[0], ' -f <metric-csv-file>' 
        sys.exit(2)

    # An array to contain any parameters for the template.
    parameters = []
    # An array to contain any conditions for the template.
    conditions = {}
    # An array to contain any key value maps for the template.
    maps = []
    # An array to contain any resource objects for the template.
    resources = []
    # An array to contain any output objects for the template.
    outputs = []

    with open(FILE, 'rbU') as f:
        reader = csv.reader(f)
        try:
            for row in islice(reader, 1, None):
                resources.append(Alarm(
                    "QueueDepthAlarm",
                    AlarmDescription="Alarm if queue depth grows beyond 10 messages",
                    Namespace="AWS/SQS",
                    MetricName="ApproximateNumberOfMessagesVisible",
                    Dimensions=[
                        MetricDimension(
                            Name="QueueName",
                            Value=GetAtt(myqueue, "QueueName")
                        ),
                    ],
                    Statistic="Sum",
                    Period="300",
                    EvaluationPeriods="1",
                    Threshold="10",
                    ComparisonOperator="GreaterThanThreshold",
                    AlarmActions=[Ref(alarmtopic), ],
                    InsufficientDataActions=[Ref(alarmtopic), ],
                ))
        except csv.Error as e:
            sys.exit('file %s, line %d: %s' % (VPC_ID, reader.line_num, e))

    t = Template()
    t.add_version('2010-09-09')
    t.add_description(
        "This is an AWS CloudFormation template that provisions metric filters "
        "based on a spreadsheet of applicable metric filters. ***WARNING*** This "
        "template creates many Amazon CloudWatch alarms based on a Amazon "
        "CloudWatch Logs Log Group. You will be billed for the AWS resources used "
        "if you create a stack from this template."
    )
    for p in parameters:
        t.add_parameter(p)
    for k in conditions:
        t.add_condition(k, conditions[k])
    for r in resources:
        t.add_resource(r)
    for o in outputs:
        t.add_output(o)

    # Print the template to JSON
    print(t.to_json())
Exemple #27
0
    Parameter(
        'DataIops',
        Description='Number of provisioned data IOPS',
        Type='Number',
        Default='0',
    ),
    Parameter(
        'DataSize',
        Description='Size of data device in GB',
        Type='Number',
        Default='500',
    ),
])

template.add_condition('EnableDataIops', {
    'Fn::Not': [{'Fn::Equals':  [Ref('DataIops'), 0]}]
})

atlas.infra_params(template)  ## ssh_key, Env, Silo

atlas.conf_params(template)   ## Conf Name, Conf Version, Conf tarball bucket

atlas.instance_params(
    template,
    roles_default=['log', ],
    iam_default='log',
)

atlas.scaling_params(template)

atlas.mappings(
Exemple #28
0
def main(args):
    number_of_vol = 5

    t = Template()
    availability_zone = t.add_parameter(
        Parameter(
            "AvailabilityZone",
            Type="String",
            Description="Availability Zone the cluster will launch into. "
            "THIS IS REQUIRED",
        ))
    raid_options = t.add_parameter(
        Parameter(
            "RAIDOptions",
            Type="CommaDelimitedList",
            Description="Comma separated list of RAID related options, "
            "8 parameters in total, "
            "["
            "0 shared_dir,"
            "1 raid_type,"
            "2 num_of_vols,"
            "3 vol_type,"
            "4 vol_size,"
            "5 vol_IOPS,"
            "6 encrypted, "
            "7 ebs_kms_key]",
        ))
    use_vol = [None] * number_of_vol
    v = [None] * number_of_vol

    for i in range(number_of_vol):
        if i == 0:
            use_vol[i] = t.add_condition(
                "UseVol%s" % (i + 1),
                Not(Equals(Select("0", Ref(raid_options)), "NONE")))
        else:
            use_vol[i] = t.add_condition(
                "UseVol%s" % (i + 1),
                And(Not(Equals(Select("2", Ref(raid_options)), str(i))),
                    Condition(use_vol[i - 1])),
            )

        use_ebs_iops = t.add_condition(
            "Vol%s_UseEBSPIOPS" % (i + 1),
            Equals(Select("3", Ref(raid_options)), "io1"))
        use_volume_size = t.add_condition(
            "Vol%s_UseVolumeSize" % (i + 1),
            Not(Equals(Select("4", Ref(raid_options)), "NONE")))
        use_volume_type = t.add_condition(
            "Vol%s_UseVolumeType" % (i + 1),
            Not(Equals(Select("3", Ref(raid_options)), "NONE")))
        use_ebs_encryption = t.add_condition(
            "Vol%s_UseEBSEncryption" % (i + 1),
            Equals(Select("6", Ref(raid_options)), "true"))
        use_ebs_kms_key = t.add_condition(
            "Vol%s_UseEBSKMSKey" % (i + 1),
            And(Condition(use_ebs_encryption),
                Not(Equals(Select("7", Ref(raid_options)), "NONE"))),
        )
        v[i] = t.add_resource(
            ec2.Volume(
                "Volume%s" % (i + 1),
                AvailabilityZone=Ref(availability_zone),
                VolumeType=If(use_volume_type, Select("3", Ref(raid_options)),
                              "gp2"),
                Size=If(use_volume_size, Select("4", Ref(raid_options)), 20),
                Iops=If(use_ebs_iops, Select("5", Ref(raid_options)), NoValue),
                Encrypted=If(use_ebs_encryption,
                             Select("6", Ref(raid_options)), NoValue),
                KmsKeyId=If(use_ebs_kms_key, Select("7", Ref(raid_options)),
                            NoValue),
                Condition=use_vol[i],
            ))

    outputs = [None] * number_of_vol
    vol_to_return = [None] * number_of_vol
    for i in range(number_of_vol):
        vol_to_return[i] = Ref(v[i])
        if i == 0:
            outputs[i] = If(use_vol[i], vol_to_return[i], "NONE")
        else:
            outputs[i] = If(use_vol[i], Join(",", vol_to_return[:(i + 1)]),
                            outputs[i - 1])

    t.add_output(
        Output("Volumeids",
               Description="Volume IDs of the resulted RAID EBS volumes",
               Value=outputs[number_of_vol - 1]))

    json_file_path = args.target_path
    output_file = open(json_file_path, "w")
    output_file.write(t.to_json())
    output_file.close()
Exemple #29
0
subnet = template.add_parameter(Parameter(
    "Subnet",
    Description="Subnet ID for creating the EMR cluster",
    Type=SUBNET_ID
))

spot = template.add_parameter(Parameter(
    "SpotPrice",
    Description="Spot price (or use 0 for 'on demand' instance)",
    Type=NUMBER,
    Default="0.1"
))

withSpotPrice = "WithSpotPrice"
template.add_condition(withSpotPrice, Not(Equals(Ref(spot), "0")))

gcTimeRatio = template.add_parameter(Parameter(
    "GcTimeRatioValue",
    Description="Hadoop name node garbage collector time ratio",
    Type=NUMBER,
    Default="19"
))

# IAM roles required by EMR

emr_service_role = template.add_resource(iam.Role(
    'EMRServiceRole',
    AssumeRolePolicyDocument={
        "Statement": [{
            "Effect": "Allow",
Exemple #30
0
        MaxLength="64",
        Type="String",
    ))

WebServerCapacity = t.add_parameter(
    Parameter(
        "WebServerCapacity",
        Description="The initial nuber of WebServer instances",
        Default="2",
        Type="Number",
        MaxValue="5",
        MinValue="1",
        ConstraintDescription="must be between 1 and 5 EC2 instances.",
    ))

t.add_condition("Is-EC2-Classic", Not(Condition("Is-EC2-VPC")))

t.add_condition(
    "Is-EC2-VPC",
    Or(Equals(Ref("AWS::Region"), "eu-central-1"),
       Equals(Ref("AWS::Region"), "cn-north-1"),
       Equals(Ref("AWS::Region"), "ap-northeast-2")))

t.add_mapping(
    "AWSInstanceType2Arch", {
        u'c1.medium': {
            u'Arch': u'PV64'
        },
        u'c1.xlarge': {
            u'Arch': u'PV64'
        },
              "to the instances",
              Type=KEY_PAIR_NAME))

subnet = template.add_parameter(
    Parameter("Subnet",
              Description="Subnet ID for creating the EMR cluster",
              Type=SUBNET_ID))

spot = template.add_parameter(
    Parameter("SpotPrice",
              Description="Spot price (or use 0 for 'on demand' instance)",
              Type=NUMBER,
              Default="0.1"))

withSpotPrice = "WithSpotPrice"
template.add_condition(withSpotPrice, Not(Equals(Ref(spot), "0")))

gcTimeRatio = template.add_parameter(
    Parameter("GcTimeRatioValue",
              Description="Hadoop name node garbage collector time ratio",
              Type=NUMBER,
              Default="19"))

# IAM roles required by EMR

emr_service_role = template.add_resource(
    iam.Role(
        'EMRServiceRole',
        AssumeRolePolicyDocument={
            "Statement": [{
                "Effect": "Allow",
Exemple #32
0
    ],
    KeySchema=[
        dynamodb.KeySchema(
            AttributeName="key",
            KeyType="HASH",
        )
    ],
))

template.add_output(Output(
    "Table2Name",
    Value=Ref(table2),
))


table1_selected = template.add_condition("Table1Selected", Equals(Ref(table), '1'))

table_item = template.add_resource(custom_resources.dynamodb.Item(
    "Item",
    TableName=If(table1_selected, Ref(table1), Ref(table2)),
    ItemKey={'key': {'S': Ref(key)}},
    ItemValue={'value': {'S': Ref(value)}},
    Overwrite=Ref(overwrite),
))


def test_item(cloudformation_stack_name):
    cfn_params = {
        "CustomResourcesStack": "vrt-dpc-custom-resources-2-stag",
        "Table": "1",
        "Key": "foo",
def main(args):
    t = Template()

    # ================= Parameters =================
    #      0            1           2              3                    4                  5           6              7
    # [shared_dir,fsx_fs_id,storage_capacity,fsx_kms_key_id,imported_file_chunk_size,export_path,import_path,weekly_maintenance_start_time]
    fsx_options = t.add_parameter(
        Parameter(
            "FSXOptions",
            Type="CommaDelimitedList",
            Description="Comma separated list of fsx related options, 8 parameters in total, [shared_dir,fsx_fs_id,storage_capacity,fsx_kms_key_id,imported_file_chunk_size,export_path,import_path,weekly_maintenance_start_time]",
        )
    )

    compute_security_group = t.add_parameter(
        Parameter("ComputeSecurityGroup", Type="String", Description="SecurityGroup for FSx filesystem")
    )

    subnet_id = t.add_parameter(Parameter("SubnetId", Type="String", Description="SubnetId for FSx filesystem"))

    # ================= Conditions =================
    create_fsx = t.add_condition(
        "CreateFSX",
        And(Not(Equals(Select(str(0), Ref(fsx_options)), "NONE")), Equals(Select(str(1), Ref(fsx_options)), "NONE")),
    )

    use_storage_capacity = t.add_condition("UseStorageCap", Not(Equals(Select(str(2), Ref(fsx_options)), "NONE")))
    use_fsx_kms_key = t.add_condition("UseFSXKMSKey", Not(Equals(Select(str(3), Ref(fsx_options)), "NONE")))
    use_imported_file_chunk_size = t.add_condition(
        "UseImportedFileChunkSize", Not(Equals(Select(str(4), Ref(fsx_options)), "NONE"))
    )
    use_export_path = t.add_condition("UseExportPath", Not(Equals(Select(str(5), Ref(fsx_options)), "NONE")))
    use_import_path = t.add_condition("UseImportPath", Not(Equals(Select(str(6), Ref(fsx_options)), "NONE")))
    use_weekly_mainenance_start_time = t.add_condition(
        "UseWeeklyMaintenanceStartTime", Not(Equals(Select(str(7), Ref(fsx_options)), "NONE"))
    )

    # ================= Resources =================
    fs = t.add_resource(
        FileSystem(
            "FileSystem",
            FileSystemType="LUSTRE",
            SubnetIds=[Ref(subnet_id)],
            SecurityGroupIds=[Ref(compute_security_group)],
            KmsKeyId=If(use_fsx_kms_key, Select(str(3), Ref(fsx_options)), NoValue),
            StorageCapacity=If(use_storage_capacity, Select(str(2), Ref(fsx_options)), NoValue),
            LustreConfiguration=LustreConfiguration(
                ImportedFileChunkSize=If(use_imported_file_chunk_size, Select(str(4), Ref(fsx_options)), NoValue),
                ExportPath=If(use_export_path, Select(str(5), Ref(fsx_options)), NoValue),
                ImportPath=If(use_import_path, Select(str(6), Ref(fsx_options)), NoValue),
                WeeklyMaintenanceStartTime=If(
                    use_weekly_mainenance_start_time, Select(str(7), Ref(fsx_options)), NoValue
                ),
            ),
            Condition=create_fsx,
        )
    )

    # ================= Outputs =================
    t.add_output(
        Output(
            "FileSystemId",
            Description="ID of the FileSystem",
            Value=If(create_fsx, Ref(fs), Select("1", Ref(fsx_options))),
        )
    )

    # Specify output file path
    json_file_path = args.target_path
    output_file = open(json_file_path, "w")
    output_file.write(t.to_json())
    output_file.close()
def generate_cloudformation_template():
    enable_elb = sys.argv[1]
    input_scaling_policies = ast.literal_eval(sys.argv[2])
    input_alarms = ast.literal_eval(sys.argv[3])

    enable_elb = enable_elb == 'True'
    elb_listeners = ast.literal_eval(sys.argv[4])

    template = Template()

    template.add_description("""\
    Configures Auto Scaling Group for the app""")

    project_name = template.add_parameter(
        Parameter(
            "Name",
            Type="String",
            Description="Instances will be tagged with this name",
        ))

    scalecapacity = template.add_parameter(
        Parameter(
            "ScaleCapacity",
            Default="1",
            Type="String",
            Description="Number of api servers to run",
        ))

    minsize = template.add_parameter(
        Parameter(
            "MinScale",
            Type="String",
            Description="Minimum number of servers to keep in the ASG",
        ))

    maxsize = template.add_parameter(
        Parameter(
            "MaxScale",
            Type="String",
            Description="Maximum number of servers to keep in the ASG",
        ))

    signalcount = template.add_parameter(
        Parameter(
            "SignalCount",
            Default="1",
            Type="String",
            Description=
            "No. of signals CF must receive before it sets the status as CREATE_COMPLETE",
        ))

    signaltimeout = template.add_parameter(
        Parameter(
            "SignalTimeout",
            Default="PT5M",
            Type="String",
            Description=
            "Time that CF waits for the number of signals that was specified in Count ",
        ))

    minsuccessfulinstancespercent = template.add_parameter(
        Parameter(
            "MinSuccessfulInstancesPercent",
            Default="100",
            Type="String",
            Description=
            "% instances in a rolling update that must signal success for CF to succeed",
        ))

    environment = template.add_parameter(
        Parameter(
            "Environment",
            Type="String",
            Description="The environment being deployed into",
        ))

    subnet = template.add_parameter(
        Parameter(
            "Subnets",
            Type="CommaDelimitedList",
        ))

    launchconfigurationname = template.add_parameter(
        Parameter(
            "LaunchConfigurationName",
            Type="String",
        ))

    health_check_grace_period = template.add_parameter(
        Parameter(
            "HealthCheckGracePeriod",
            Type="String",
            Default="300",
        ))

    if enable_elb:
        elb_subnets = template.add_parameter(
            Parameter(
                "LoadBalancerSubnets",
                Type="CommaDelimitedList",
            ))

        elb_bucket_name = template.add_parameter(
            Parameter("LoadBalancerBucketName",
                      Type="String",
                      Description="S3 Bucket for the ELB access logs"))

        template.add_condition("ElbLoggingCondition",
                               Not(Equals(Ref(elb_bucket_name), "")))

        elb_schema = template.add_parameter(
            Parameter(
                "LoadBalancerSchema",
                Type="String",
            ))

        health_check_interval = template.add_parameter(
            Parameter(
                "LoadBalancerHealthCheckInterval",
                Type="String",
            ))

        health_check_timeout = template.add_parameter(
            Parameter(
                "LoadBalancerHealthCheckTimeout",
                Type="String",
            ))

        healthy_threshold = template.add_parameter(
            Parameter(
                "LoadBalancerHealthyThreshold",
                Type="String",
            ))

        unhealthy_threshold = template.add_parameter(
            Parameter(
                "LoadBalancerUnHealthyThreshold",
                Type="String",
            ))

        enable_connection_draining = template.add_parameter(
            Parameter(
                "LoadBalancerEnableConnectionDraining",
                Type="String",
                Default="True",
            ))

        connection_draining_timeout = template.add_parameter(
            Parameter(
                "LoadBalancerConnectionDrainingTimeout",
                Type="String",
                Default="30",
            ))

        loadbalancersecuritygroup = template.add_parameter(
            Parameter(
                "LoadBalancerSecurityGroup",
                Type="CommaDelimitedList",
                Description="Security group for api app load balancer.",
            ))

        hostedzone = template.add_parameter(
            Parameter(
                "HostedZoneName",
                Description=
                "The DNS name of an existing Amazon Route 53 hosted zone",
                Type="String",
            ))

        dns_record = template.add_parameter(
            Parameter(
                "DNSRecord",
                Type="String",
            ))

        dns_ttl = template.add_parameter(
            Parameter(
                "DNSTTL",
                Default="300",
                Type="String",
            ))

        new_weight = template.add_parameter(
            Parameter(
                "NewDnsWeight",
                Type="String",
                Default="100",
            ))

        health_check_protocol = template.add_parameter(
            Parameter(
                "LoadBalancerHealthCheckProtocol",
                Type="String",
            ))

        template.add_condition("ElbTCPProtocolCondition",
                               Equals(Ref(health_check_protocol), "TCP"))

        health_check_port = template.add_parameter(
            Parameter(
                "LoadBalancerHealthCheckPort",
                Type="String",
            ))

        health_check_path = template.add_parameter(
            Parameter(
                "LoadBalancerHealthCheckPath",
                Type="String",
            ))

        load_balancer_listeners = []
        for listener in elb_listeners:
            load_balancer_listeners.append(
                elb.Listener(
                    LoadBalancerPort=listener['load_balancer_port'],
                    InstancePort=listener['instance_port'],
                    Protocol=listener['protocol'],
                    InstanceProtocol=Ref(health_check_protocol),
                ))

        loadbalancer = template.add_resource(
            elb.LoadBalancer(
                "LoadBalancer",
                AccessLoggingPolicy=If(
                    "ElbLoggingCondition",
                    elb.AccessLoggingPolicy(EmitInterval=60,
                                            Enabled=True,
                                            S3BucketName=Ref(elb_bucket_name),
                                            S3BucketPrefix="ELBLogs"),
                    Ref("AWS::NoValue")),
                ConnectionDrainingPolicy=elb.ConnectionDrainingPolicy(
                    Enabled=Ref(enable_connection_draining),
                    Timeout=Ref(connection_draining_timeout),
                ),
                Subnets=Ref(elb_subnets),
                HealthCheck=elb.HealthCheck(
                    Target=Join("", [
                        Ref(health_check_protocol), ":",
                        Ref(health_check_port),
                        If("ElbTCPProtocolCondition", Ref("AWS::NoValue"),
                           Ref(health_check_path))
                    ]),
                    HealthyThreshold=Ref(healthy_threshold),
                    UnhealthyThreshold=Ref(unhealthy_threshold),
                    Interval=Ref(health_check_interval),
                    Timeout=Ref(health_check_timeout),
                ),
                Listeners=load_balancer_listeners,
                CrossZone=True,
                SecurityGroups=Ref(loadbalancersecuritygroup),
                Scheme=Ref(elb_schema)))

        route53record = template.add_resource(
            RecordSetType(
                "DNS",
                HostedZoneName=Join("", [Ref(hostedzone), "."]),
                Name=Join("", [Ref(dns_record), ".",
                               Ref(hostedzone), "."]),
                ResourceRecords=[GetAtt(loadbalancer, "DNSName")],
                SetIdentifier=Ref(project_name),
                TTL=Ref(dns_ttl),
                Type="CNAME",
                Weight=Ref(new_weight),
            ))

    autoscalinggroup = template.add_resource(
        AutoScalingGroup(
            "AutoscalingGroup",
            Tags=[
                Tag("Name", Ref(project_name), True),
                Tag("Environment", Ref(environment), True)
            ],
            LaunchConfigurationName=Ref(launchconfigurationname),
            MinSize=Ref(minsize),
            MaxSize=Ref(maxsize),
            DesiredCapacity=Ref(scalecapacity),
            VPCZoneIdentifier=Ref(subnet),
            HealthCheckGracePeriod=Ref(health_check_grace_period),
            CreationPolicy=CreationPolicy(
                ResourceSignal=ResourceSignal(Count=Ref(signalcount),
                                              Timeout=Ref(signaltimeout)),
                AutoScalingCreationPolicy=AutoScalingCreationPolicy(
                    MinSuccessfulInstancesPercent=Ref(
                        minsuccessfulinstancespercent))),
            UpdatePolicy=UpdatePolicy(
                AutoScalingRollingUpdate=AutoScalingRollingUpdate(
                    MaxBatchSize='1',
                    MinInstancesInService='1',
                    MinSuccessfulInstancesPercent=Ref(
                        minsuccessfulinstancespercent),
                    PauseTime=Ref(signaltimeout),
                    WaitOnResourceSignals=True))))

    autoscalinggroup.HealthCheckType = 'EC2'
    if enable_elb:
        autoscalinggroup.LoadBalancerNames = [Ref(loadbalancer)]
        autoscalinggroup.HealthCheckType = 'ELB'

    created_scaling_policies = dict()
    for scaling_policy in input_scaling_policies:
        policy_properties = {
            'AdjustmentType': scaling_policy['adjustment_type'],
            'AutoScalingGroupName': Ref(autoscalinggroup),
            'Cooldown': scaling_policy['cooldown'],
            'PolicyType': scaling_policy['policy_type'],
            'ScalingAdjustment': scaling_policy['scaling_adjustment'],
        }
        if scaling_policy['policy_type'] != "SimpleScaling" \
                and 'estimated_instance_warmup' in scaling_policy:
            policy_properties['EstimatedInstanceWarmup'] = \
                scaling_policy['estimated_instance_warmup']

        if scaling_policy['policy_type'] != "SimpleScaling" \
                and 'metric_aggregation_type' in scaling_policy:
            policy_properties['MetricAggregationType'] = scaling_policy[
                'metric_aggregation_type']

        if scaling_policy['adjustment_type'] == "PercentChangeInCapacity" \
                and 'min_adjustment_magnitude' in scaling_policy:
            policy_properties['MinAdjustmentMagnitude'] = scaling_policy[
                'min_adjustment_magnitude']

        if 'step_adjustments' in scaling_policy:
            policy_properties['StepAdjustments'] = scaling_policy[
                'step_adjustments']

        created_scaling_policies[
            scaling_policy['name']] = template.add_resource(
                ScalingPolicy(scaling_policy['name'], **policy_properties))

    for alarm in input_alarms:
        template.add_resource(
            Alarm(
                alarm['name'],
                ActionsEnabled=True,
                AlarmActions=[
                    Ref(created_scaling_policies[alarm['scaling_policy_name']])
                ],
                AlarmDescription=alarm['description'],
                ComparisonOperator=alarm['comparison'],
                Dimensions=[
                    MetricDimension(Name="AutoScalingGroupName",
                                    Value=Ref(autoscalinggroup)),
                ],
                EvaluationPeriods=alarm['evaluation_periods'],
                InsufficientDataActions=[],
                MetricName=alarm['metric'],
                Namespace=alarm['namespace'],
                OKActions=[],
                Period=alarm['period'],
                Statistic=alarm['statistics'],
                Threshold=str(alarm['threshold']),
                Unit=alarm['unit'],
            ))

    template.add_output(
        Output("StackName", Value=Ref(project_name), Description="Stack Name"))
    if enable_elb:
        template.add_output(
            Output("DomainName",
                   Value=Ref(route53record),
                   Description="DNS to access the service"))
        template.add_output(
            Output("LoadBalancer",
                   Value=GetAtt(loadbalancer, "DNSName"),
                   Description="ELB dns"))
    template.add_output(
        Output("AutoScalingGroup",
               Value=Ref(autoscalinggroup),
               Description="Auto Scaling Group"))
    template.add_output(
        Output("LaunchConfiguration",
               Value=Ref(launchconfigurationname),
               Description="LaunchConfiguration for this deploy"))

    return template
Exemple #35
0
    AllowedValues=['VPC', 'Subnet', 'NetworkInterface'],
))

param_traffic_type = t.add_parameter(Parameter(
    'TrafficType',
    Description='The type of traffic to log.',
    Type='String',
    Default='ALL',
    AllowedValues=['ACCEPT', 'REJECT', 'ALL'],
))

#
# Don't expire condition
#
t.add_condition(
    'NotExpireCondition',
    Equals(Ref(param_retention), -1)
)

#
# Resources
#

log_group = t.add_resource(logs.LogGroup(
    'LogGroup',
    RetentionInDays=If('NotExpireCondition',
                       Ref(AWS_NO_VALUE),
                       Ref(param_retention))
))

log_delivery_role = t.add_resource(iam.Role(
    'LogDeliveryRole',
def main(args):
    t = Template()

    # ================= Parameters =================
    #      0            1           2              3                    4                  5           6
    # [shared_dir,fsx_fs_id,storage_capacity,fsx_kms_key_id,imported_file_chunk_size,export_path,import_path,
    #              7
    # weekly_maintenance_start_time]
    fsx_options = t.add_parameter(
        Parameter(
            "FSXOptions",
            Type="CommaDelimitedList",
            Description=
            "Comma separated list of fsx related options, 8 parameters in total, [shared_dir,fsx_fs_id,"
            "storage_capacity,fsx_kms_key_id,imported_file_chunk_size,export_path,import_path,"
            "weekly_maintenance_start_time]",
        ))

    compute_security_group = t.add_parameter(
        Parameter("ComputeSecurityGroup",
                  Type="String",
                  Description="SecurityGroup for FSx filesystem"))

    subnet_id = t.add_parameter(
        Parameter("SubnetId",
                  Type="String",
                  Description="SubnetId for FSx filesystem"))

    # ================= Conditions =================
    create_fsx = t.add_condition(
        "CreateFSX",
        And(Not(Equals(Select(str(0), Ref(fsx_options)), "NONE")),
            Equals(Select(str(1), Ref(fsx_options)), "NONE")),
    )

    use_storage_capacity = t.add_condition(
        "UseStorageCap", Not(Equals(Select(str(2), Ref(fsx_options)), "NONE")))
    use_fsx_kms_key = t.add_condition(
        "UseFSXKMSKey", Not(Equals(Select(str(3), Ref(fsx_options)), "NONE")))
    use_imported_file_chunk_size = t.add_condition(
        "UseImportedFileChunkSize",
        Not(Equals(Select(str(4), Ref(fsx_options)), "NONE")))
    use_export_path = t.add_condition(
        "UseExportPath", Not(Equals(Select(str(5), Ref(fsx_options)), "NONE")))
    use_import_path = t.add_condition(
        "UseImportPath", Not(Equals(Select(str(6), Ref(fsx_options)), "NONE")))
    use_weekly_mainenance_start_time = t.add_condition(
        "UseWeeklyMaintenanceStartTime",
        Not(Equals(Select(str(7), Ref(fsx_options)), "NONE")))

    # ================= Resources =================
    fs = t.add_resource(
        FileSystem(
            "FileSystem",
            FileSystemType="LUSTRE",
            SubnetIds=[Ref(subnet_id)],
            SecurityGroupIds=[Ref(compute_security_group)],
            KmsKeyId=If(use_fsx_kms_key, Select(str(3), Ref(fsx_options)),
                        NoValue),
            StorageCapacity=If(use_storage_capacity,
                               Select(str(2), Ref(fsx_options)), NoValue),
            LustreConfiguration=LustreConfiguration(
                ImportedFileChunkSize=If(use_imported_file_chunk_size,
                                         Select(str(4), Ref(fsx_options)),
                                         NoValue),
                ExportPath=If(use_export_path,
                              Select(str(5), Ref(fsx_options)), NoValue),
                ImportPath=If(use_import_path,
                              Select(str(6), Ref(fsx_options)), NoValue),
                WeeklyMaintenanceStartTime=If(use_weekly_mainenance_start_time,
                                              Select(str(7), Ref(fsx_options)),
                                              NoValue),
            ),
            Condition=create_fsx,
        ))

    # ================= Outputs =================
    t.add_output(
        Output(
            "FileSystemId",
            Description="ID of the FileSystem",
            Value=If(create_fsx, Ref(fs), Select("1", Ref(fsx_options))),
        ))

    # Specify output file path
    json_file_path = args.target_path
    output_file = open(json_file_path, "w")
    output_file.write(t.to_json())
    output_file.close()
Exemple #37
0
class Stack(cfn.Stack):
    """Cloudformation stack using troposphere resources."""
    def __init__(
        self,
        stack_name: str,
        description: Optional[str] = None,
        cfn_role_arn: Optional[str] = None,
        deploy_session: Optional[Session] = None,
        dry_run: Optional[bool] = False,
        s3_bucket: Optional[str] = None,
        s3_key: Optional[str] = None,
    ) -> None:
        """Initialize Stack attributes.

        :param stack_name: stack name
        :param cfn_role_arn: role asssumed by cloud formation to create the stack
        :param deploy_session: AWS session to deploy non CloudFormation AWS
            resources (aka Assets)
        :param dry_run: True if the stack is not to be deployed.
        :param description: a description of the stack
        :param s3_bucket: s3 bucket used to store data needed by the stack
        :param s3_key: s3 prefix in s3_bucket in which data is stored
        """
        super().__init__(
            stack_name,
            cfn_role_arn=cfn_role_arn,
            description=description,
            s3_bucket=s3_bucket,
            s3_key=s3_key,
        )
        self.constructs: list[Construct | AWSObject] = []

        self.deploy_session = deploy_session
        self.dry_run = dry_run
        self.template = Template()

    def add(self, element: Union[AWSObject, Construct, Stack]) -> Stack:
        """Add a Construct or AWSObject to the stack.

        :param element: if a resource an AWSObject or Construct add the resource
             to the stack. If a stack merge its resources into the current stack.
        """
        if isinstance(element, Stack):
            constructs = element.constructs

        else:
            constructs = [element]

        # Add the new constructs (non expanded)
        self.constructs += constructs

        # Update the template
        resources = []
        for construct in constructs:
            if isinstance(construct, Construct):
                resources += construct.resources(stack=self)
            if isinstance(construct, AWSObject):
                resources.append(construct)
        self.template.add_resource(resources)

        return self

    def add_condition(self, condition_name: str,
                      condition: ConditionFunction) -> None:
        """Add condition to stack template.

        :param condition_name: name of the condition to add
        :param condition: condition to add
        """
        self.template.add_condition(condition_name, condition)

    def cfn_policy_document(self) -> PolicyDocument:
        """Return stack necessary policy document for CloudFormation."""
        result = PolicyDocument([])
        for construct in self.constructs:
            if isinstance(construct, Construct):
                result += construct.cfn_policy_document(stack=self)

        return result

    def __getitem__(self, resource_name: str) -> AWSObject:
        """Return AWSObject associated with resource_name.

        :param resource_name: name of the resource to retrieve
        """
        return self.template.resources[name_to_id(resource_name)]

    def export(self) -> dict:
        """Export stack as dict.

        :return: a dict that can be serialized as YAML to produce a template
        """
        result = self.template.to_dict()
        if self.description is not None:
            result["Description"] = self.description
        return result

    def create_data_dir(self, root_dir: str) -> None:
        """Populate root_dir with data needed by all constructs in the stack.

        :param root_dir: the local directory in which to store the data
        """
        for construct in self.constructs:
            if isinstance(construct, Construct):
                construct.create_data_dir(root_dir)
Exemple #38
0
              Type="Number",
              Default="5",
              MinValue="5",
              MaxValue="10000",
              ConstraintDescription="should be between 5 and 10000"))

writeunits = template.add_parameter(
    Parameter("WriteCapacityUnits",
              Description="Provisioned write throughput",
              Type="Number",
              Default="10",
              MinValue="5",
              MaxValue="10000",
              ConstraintDescription="should be between 5 and 10000"))

template.add_condition("OnDemand", Equals(Ref(on_demand), "true"))

hashkeyname = template.add_parameter(
    Parameter(
        "HashKeyElementName",
        Description="HashType PrimaryKey Name",
        Type="String",
        AllowedPattern="[a-zA-Z0-9]*",
        MinLength="1",
        MaxLength="2048",
        ConstraintDescription="must contain only alphanumberic characters"))

hashkeytype = template.add_parameter(
    Parameter("HashKeyElementType",
              Description="HashType PrimaryKey Type",
              Type="String",
Exemple #39
0
def ssm_network():
    template = Template()

    default_route = "0.0.0.0/0"
    vpc_cidr = "192.168.0.0/16"

    template.add_parameter(Parameter(
        "VpcCidr",
        Type="String",
        Description="Cidr block for VPC",
        MinLength="9",
        MaxLength="18",
        Default=vpc_cidr,
        AllowedPattern="(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})",
        ConstraintDescription="Must match following pattern 'xxx.xxx.xxx.xxx/xx'"
    ))

    template.add_parameter(Parameter(
        "CreateEndpoints",
        Type="String",
        Description="Create VPC Endpoints",
        Default="No",
        AllowedValues=["Yes", "No"],
        ConstraintDescription="'Yes' or 'No' are only options"
    ))

    template.add_parameter(Parameter(
        "CreateNatGateway",
        Type="String",
        Description="Create NAT Gateway",
        Default="No",
        AllowedValues=["Yes", "No"],
        ConstraintDescription="'Yes' or 'No' are only options"
    ))

    conditions = {
        "CreateVpcEndpointsUpperYes": Equals(
            Ref("CreateEndpoints"), "Yes"
        ),
        "CreateVpcEndpointsLowerYes": Equals(
            Ref("CreateEndpoints"), "yes"
        ),
        "CreateVpcEndpoints": Or(
            Condition("CreateVpcEndpointsUpperYes"),
            Condition("CreateVpcEndpointsLowerYes")
        ),
        "CreateNatGatewayUpperYes": Equals(
            Ref("CreateNatGateway"), "Yes"
        ),
        "CreateNatGatewayLowerYes": Equals(
            Ref("CreateNatGateway"), "yes"
        ),
        "CreateNatGateway": Or(
            Condition("CreateNatGatewayUpperYes"),
            Condition("CreateNatGatewayLowerYes")
        )
    }

    ssm_vpc = ec2.VPC(
        'SsmVpc',
        CidrBlock=Ref("VpcCidr"),
        InstanceTenancy="default",
        EnableDnsHostnames=True,
        EnableDnsSupport=True,
        Tags=Tags(
            Name="SSM VPC"
        )
    )

    subnet_blocks = Cidr(GetAtt(ssm_vpc, "CidrBlock"), 256, 8)

    ssm_ig = ec2.InternetGateway(
        'SsmIG',
    )

    ssm_attach_gw = ec2.VPCGatewayAttachment(
        'SsmAttachGateway',
        InternetGatewayId=Ref(ssm_ig),
        VpcId=Ref(ssm_vpc)
    )

    ssm_public_subnet = ec2.Subnet(
        'SsmPublicSubnet',
        DependsOn=ssm_attach_gw,
        AvailabilityZone=Select(0, GetAZs('')),
        CidrBlock=Select(0, subnet_blocks),
        VpcId=Ref(ssm_vpc),
        Tags=Tags(
            Name="Public Subnet"
        )
    )

    ssm_public_route_table = ec2.RouteTable(
        'SsmPublicRouteTable',
        VpcId=Ref(ssm_vpc),
    )

    ssm_public_route = ec2.Route(
        'SsmPublicRoute',
        DestinationCidrBlock=default_route,
        GatewayId=Ref(ssm_ig),
        RouteTableId=Ref(ssm_public_route_table)
    )

    ssm_public_subnet_route_table_association = ec2.SubnetRouteTableAssociation(
        'SsmPublicSubnetRouteTableAssociation',
        RouteTableId=Ref(ssm_public_route_table),
        SubnetId=Ref(ssm_public_subnet)
    )

    ssm_eip_nat_gateway = ec2.EIP(
        'SsmEipNatGateway',
        Condition="CreateNatGateway"
    )

    ssm_nat_gateway = ec2.NatGateway(
        'SsmNatGateway',
        Condition="CreateNatGateway",
        DependsOn=ssm_eip_nat_gateway,
        SubnetId=Ref(ssm_public_subnet),
        AllocationId=GetAtt(ssm_eip_nat_gateway, "AllocationId"),
    )

    ssm_private_subnet = ec2.Subnet(
        'SsmPrivateSubnet',
        DependsOn=ssm_attach_gw,
        AvailabilityZone=Select(0, GetAZs('')),
        CidrBlock=Select(1, subnet_blocks),
        VpcId=Ref(ssm_vpc),
        Tags=Tags(
            Name="Private Subnet"
        )
    )

    ssm_private_route_table = ec2.RouteTable(
        'SsmPrivateRouteTable',
        VpcId=Ref(ssm_vpc),
    )

    ssm_private_route = ec2.Route(
        'SsmPrivateRoute',
        Condition="CreateNatGateway",
        DestinationCidrBlock=default_route,
        NatGatewayId=Ref(ssm_nat_gateway),
        RouteTableId=Ref(ssm_private_route_table)
    )

    ssm_private_subnet_route_table_association = ec2.SubnetRouteTableAssociation(
        'SsmPrivateSubnetRouteTableAssociation',
        RouteTableId=Ref(ssm_private_route_table),
        SubnetId=Ref(ssm_private_subnet)
    )

    ssm_sg_ingress_rules = [
        ec2.SecurityGroupRule(
            ToPort=443,
            FromPort=443,
            IpProtocol="tcp",
            CidrIp=GetAtt(ssm_vpc, "CidrBlock")
        )
    ]

    ssm_security_group = ec2.SecurityGroup(
        'SsmSecurityGroup',
        GroupName="SsmSG",
        GroupDescription="SG for SSM usage",
        VpcId=Ref(ssm_vpc),
        SecurityGroupIngress=ssm_sg_ingress_rules
    )

    ssm_s3e_vpc_endpoint = ec2.VPCEndpoint(
        'SsmS3VpcEndpoint',
        Condition="CreateVpcEndpoints",
        RouteTableIds=[
            Ref(ssm_private_route_table)
        ],
        ServiceName=vpc_endpoint("s3"),
        VpcId=Ref(ssm_vpc),
        VpcEndpointType="Gateway"
    )

    ssm_ssm_vpc_endpoint = ec2.VPCEndpoint(
        'SsmSsmVpcEndpoint',
        Condition="CreateVpcEndpoints",
        SubnetIds=[Ref(ssm_private_subnet)],
        ServiceName=vpc_endpoint("ssm"),
        VpcId=Ref(ssm_vpc),
        VpcEndpointType="Interface",
        SecurityGroupIds=[
            Ref(ssm_security_group)
        ],
        PrivateDnsEnabled=True
    )

    ssm_ssmmessages_vpc_endpoint = ec2.VPCEndpoint(
        'SsmSsmMessagesVpcEndpoint',
        Condition="CreateVpcEndpoints",
        SubnetIds=[Ref(ssm_private_subnet)],
        ServiceName=vpc_endpoint("ssmmessages"),
        VpcId=Ref(ssm_vpc),
        VpcEndpointType="Interface",
        SecurityGroupIds=[
            Ref(ssm_security_group)
        ],
        PrivateDnsEnabled=True
    )

    ssm_ec2messages_vpc_endpoint = ec2.VPCEndpoint(
        'SsmEc2MessagesVpcEndpoint',
        Condition="CreateVpcEndpoints",
        SubnetIds=[Ref(ssm_private_subnet)],
        ServiceName=vpc_endpoint("ec2messages"),
        VpcId=Ref(ssm_vpc),
        VpcEndpointType="Interface",
        SecurityGroupIds=[
            Ref(ssm_security_group)
        ],
        PrivateDnsEnabled=True
    )

    template.add_resource(ssm_vpc)
    template.add_resource(ssm_ig)
    template.add_resource(ssm_attach_gw)
    template.add_resource(ssm_eip_nat_gateway)
    template.add_resource(ssm_public_subnet)
    template.add_resource(ssm_public_route_table)
    template.add_resource(ssm_nat_gateway)
    template.add_resource(ssm_public_route)
    template.add_resource(ssm_public_subnet_route_table_association)
    template.add_resource(ssm_private_subnet)
    template.add_resource(ssm_private_route_table)
    template.add_resource(ssm_private_route)
    template.add_resource(ssm_private_subnet_route_table_association)
    template.add_resource(ssm_security_group)
    template.add_resource(ssm_s3e_vpc_endpoint)
    template.add_resource(ssm_ec2messages_vpc_endpoint)
    template.add_resource(ssm_ssm_vpc_endpoint)
    template.add_resource(ssm_ssmmessages_vpc_endpoint)

    for k in conditions:
        template.add_condition(k, conditions[k])

    template.add_output(Output(
        'SsmVpc',
        Description="VPC for SSM",
        Value=Ref(ssm_vpc),
        Export=Export(Join("", [Ref("AWS::StackName"), "-ssm-vpc"]))
    ))

    template.add_output(Output(
        'SsmSg',
        Description="Security Group for SSM",
        Value=Ref(ssm_security_group),
        Export=Export(Join("", [Ref("AWS::StackName"), "-ssm-sg"]))
    ))

    template.add_output(Output(
        'SsmPrivateSubnet',
        Description="Private Subnet for SSM",
        Value=Ref(ssm_private_subnet),
        Export=Export(Join("", [Ref("AWS::StackName"), "-ssm-private-subnet"]))
    ))

    template.add_output(Output(
        'SsmPrivateRouteTable',
        Description="Private RouteTable for SSM",
        Value=Ref(ssm_private_route_table),
        Export=Export(Join("", [Ref("AWS::StackName"), "-ssm-private-route-table"]))
    ))

    with open(os.path.dirname(os.path.realpath(__file__)) + '/ssm_network.yml', 'w') as cf_file:
        cf_file.write(template.to_yaml())

    return template.to_yaml()
    "MasterUserPassword",
    Description="The password associated with the master user account for the "
    "redshift cluster that is being created.",
    Type="String",
    NoEcho=True,
))

conditions = {
    "IsMultiNodeCluster": Equals(
        Ref("ClusterType"),
        "multi-mode"
    ),
}

for k in conditions:
    t.add_condition(k, conditions[k])

redshiftcluster = t.add_resource(Cluster(
    "RedshiftCluster",
    ClusterType=Ref("ClusterType"),
    NumberOfNodes=If("IsMultiNodeCluster",
                     Ref("NumberOfNodes"), Ref("AWS::NoValue")),
    NodeType=Ref("NodeType"),
    DBName=Ref("DatabaseName"),
    MasterUsername=Ref("MasterUsername"),
    MasterUserPassword=Ref("MasterUserPassword"),
    ClusterParameterGroupName=Ref("RedshiftClusterParameterGroup"),
    VpcSecurityGroupIds=Ref("SecurityGroup"),
    ClusterSubnetGroupName=Ref("RedshiftClusterSubnetGroup"),
))
Exemple #41
0
class Stack(object):
    def __init__(self):
        self.template = Template()
        self.template.add_version("2010-09-09")
        self.template.add_description("Create resources for the \
AWS account environment. Includes VPC, shared services and network elements")

        parameters = Parameters()
        vpc = Vpc(parameters=parameters)
        outputs = Outputs(vpc=vpc, parameters=parameters)

        for resource in parameters.values():
            self.template.add_parameter(resource)

        # Condition to specify whether NAT gateways should be deployed (NAT GWs cost $$$)
        self.template.add_condition(
            "DeployNATGateways",
            Equals(Ref(parameters.DeployNATGateways), "true"))

        # Condition to specify whether to create a production route53 zone
        self.template.add_condition(
            "CreateProductionZone", Equals(Ref(parameters.Environment),
                                           "prod"))

        for resource in vpc.values():
            self.template.add_resource(resource)

        for res in outputs.values():
            self.template.add_output(res)

        self.template.add_metadata({
            "AWS::CloudFormation::Interface": {
                "ParameterGroups": [
                    {
                        "Label": {
                            "default": "Availability Zones"
                        },
                        "Parameters":
                        ["AvailabilityZoneA", "AvailabilityZoneB"]
                    },
                    {
                        "Label": {
                            "default": "Environment"
                        },
                        "Parameters": ["Environment"]
                    },
                    {
                        "Label": {
                            "default": "Domain"
                        },
                        "Parameters": ["DomainName"]
                    },
                    {
                        "Label": {
                            "default": "NAT Gateway"
                        },
                        "Parameters": ["DeployNATGateways"]
                    },
                    {
                        "Label": {
                            "default": "VPC"
                        },
                        "Parameters": ["VPCCIDR"]
                    },
                    {
                        "Label": {
                            "default": "Private Subnets"
                        },
                        "Parameters": [
                            "GeneralPrivateSubnetACIDR",
                            "GeneralPrivateSubnetBCIDR"
                        ]
                    },
                    {
                        "Label": {
                            "default": "Shared Services Public Subnets"
                        },
                        "Parameters": [
                            "SharedServicesPublicSubnetACIDR",
                            "SharedServicesPublicSubnetBCIDR"
                        ]
                    },
                    {
                        "Label": {
                            "default": "Shared Services Private Subnets"
                        },
                        "Parameters": [
                            "SharedServicesPrivateSubnetACIDR",
                            "SharedServicesPrivateSubnetBCIDR"
                        ]
                    },
                    {
                        "Label": {
                            "default": "Load Balancer Subnets"
                        },
                        "Parameters": ["LBSubnetACIDR", "LBSubnetBCIDR"]
                    },
                ]
            }
        })
              Description=
              'The number of cache nodes that the cache cluster should have.',
              Type='Number',
              Default=1,
              MaxValue=16,
              MinValue=1))

#
# Mapping
#

#
# Condition
#

t.add_condition('CreateSecurityGroupCondition', Equals(Ref(param_sg), ''))

#
# Resources
#

cache_sg = t.add_resource(
    ec2.SecurityGroup(
        'CacheSecurityGroup',
        Condition='CreateSecurityGroupCondition',
        VpcId=Ref(param_vpcid),
        GroupDescription='Enable cache access',
        SecurityGroupIngress=[
            ec2.SecurityGroupRule(
                IpProtocol='tcp',
                FromPort='6379',
    Parameter(
        "S3BucketParameter",
        Type="String",
        Description=
        "Name of the S3 bucket where you uploaded the source code zip",
    ))

source_zip_parameter = template.add_parameter(
    Parameter(
        "SourceZipParameter",
        Type="String",
        Default="backup-rds.zip",
        Description="Name of the zip file inside the S3 bucket",
    ))

template.add_condition("UseAllDatabases",
                       Equals(Join("", Ref(databases_to_use_parameter)), ""))
template.add_condition(
    "UseEncryption",
    Equals(Ref(kms_key_parameter), ""),
)
template.add_condition("IncludeAurora",
                       Equals(Ref(include_aurora_clusters_parameter), "Yes"))

template.add_metadata({
    "AWS::CloudFormation::Interface": {
        "ParameterGroups": [
            {
                "Label": {
                    "default": "Basic configuration"
                },
                "Parameters": [
#
# Parameters
#
param_bucket_name = t.add_parameter(
    Parameter(
        'BucketName',
        Description='Bucket name',
        Default='',
        Type='String',
        AllowedPattern=r'[-\.a-z0-9]*',
    ))

#
# Condition
#
t.add_condition('HasBucketName', Not(Equals(Ref(param_bucket_name), '')))
#
# Resource
#

bucket = t.add_resource(
    s3.Bucket(
        'Bucket',
        BucketName=If('HasBucketName', Ref(param_bucket_name),
                      Ref(AWS_NO_VALUE)),
        LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[
            # Add a rule to
            s3.LifecycleRule(
                # Rule attributes
                Id='S3BucketRule1',
                Prefix='',
    MinValue="5",
    MaxValue="10000",
    ConstraintDescription="should be between 5 and 10000"
))

writeunits = template.add_parameter(Parameter(
    "WriteCapacityUnits",
    Description="Provisioned write throughput",
    Type="Number",
    Default="10",
    MinValue="5",
    MaxValue="10000",
    ConstraintDescription="should be between 5 and 10000"
))

template.add_condition("OnDemand", Equals(Ref(on_demand), "true"))

hashkeyname = template.add_parameter(Parameter(
    "HashKeyElementName",
    Description="HashType PrimaryKey Name",
    Type="String",
    AllowedPattern="[a-zA-Z0-9]*",
    MinLength="1",
    MaxLength="2048",
    ConstraintDescription="must contain only alphanumberic characters"
))

hashkeytype = template.add_parameter(Parameter(
    "HashKeyElementType",
    Description="HashType PrimaryKey Type",
    Type="String",