Esempio n. 1
0
 def test_tg_healthcheck_port_ref(self):
     p = Parameter('myport')
     tg_healthcheck_port(Ref(p))
Esempio n. 2
0
                 Statement=[
                     awacs.aws.Statement(
                         Effect=awacs.aws.Allow,
                         Action=[awacs.sts.AssumeRole],
                         Principal=awacs.aws.Principal(
                             "Service", "apigateway.amazonaws.com"),
                     )
                 ]),
             ManagedPolicyArns=[
                 "arn:aws:iam::aws:policy/service-role/AWSLambdaRole",
             ]))

devices_authorizer = template.add_resource(
    apigateway.Authorizer(
        'DevicesApiAuthorizer',
        RestApiId=Ref(restapi),
        Name='DevicesApiAuthorizer',
        Type='REQUEST',
        AuthorizerResultTtlInSeconds=0,
        AuthorizerCredentials=GetAtt(authorizer_credentials, "Arn"),
        IdentitySource="method.request.header.Attestation",
        AuthorizerUri=lambda_invocation_arn(authorizer_lambda)))

android_resource = template.add_resource(
    apigateway.Resource('AndroidResource',
                        RestApiId=Ref(restapi),
                        PathPart='android',
                        ParentId=GetAtt(restapi, 'RootResourceId')))

root_method = template.add_resource(
    apigateway.Method('RootMethod',
Esempio n. 3
0
        Type="String",
        Description="Public Subnet CIDR",
        Default="172.18.0.0/22",
    ))

private_subnet = t.add_parameter(
    Parameter(
        "PrivateSubnetCidr",
        Type="String",
        Description="Public Subnet CIDR",
        Default="172.18.32.0/21",
    ))

vpc = t.add_resource(ec2.VPC(
    "VPC",
    CidrBlock=Ref(vpc_cidr),
))

public_net = t.add_resource(
    ec2.Subnet(
        "PublicSubnet",
        CidrBlock=Ref(public_subnet),
        MapPublicIpOnLaunch=True,
        VpcId=Ref(vpc),
    ))

private_net = t.add_resource(
    ec2.Subnet(
        "PrivateSubnet",
        CidrBlock=Ref(private_subnet),
        MapPublicIpOnLaunch=False,
        SecurityGroupIngress=[
            ec2.SecurityGroupRule(
                IpProtocol="tcp",
                FromPort="22",
                ToPort="22",
                CidrIp=PublicCidrIp,
            ),
            ec2.SecurityGroupRule(
                IpProtocol="tcp",
                FromPort=ApplicationPort,
                ToPort=ApplicationPort,
                CidrIp="0.0.0.0/0",
            ),
        ],
        #elb에서 추가한 보안그룹을 참조하도록 추가
        VpcId=Ref("VpcId"),
    ))
#elb 보안그룹 추가
t.add_resource(
    ec2.SecurityGroup(
        "LoadBalancerSecurityGroup",
        GroupDescription="Web load balancer security group.",
        VpcId=Ref("VpcId"),
        SecurityGroupIngress=[
            ec2.SecurityGroupRule(
                IpProtocol="tcp",
                FromPort="3000",
                ToPort="3000",
                CidrIp="0.0.0.0/0",
            ),
        ],
ud = Base64(
    Join('\n', [
        "#!/bin/bash",
        "curl --silent --location https://rpm.nodesource.com/setup_10.x | sudo bash -",
        "sudo yum -y install nodejs",
        "sudo wget http://bit.ly/2vESNuc -O /home/ec2-user/helloworld.js",
        "sudo wget http://bit.ly/2vVvT18 -O /etc/init/helloworld.conf",
        "sudo start helloworld"
    ]))

t.add_resource(
    ec2.Instance(
        "instance",
        ImageId="ami-a4c7edb2",
        InstanceType="t2.micro",
        SecurityGroups=[Ref("SecurityGroup")],
        KeyName=Ref("awstest"),
        UserData=ud,
    ))

t.add_output(
    Output(
        "InstancePublicIp",
        Description="Public IP of our instance.",
        Value=GetAtt("instance", "PublicIp"),
    ))

t.add_output(
    Output(
        "WebUrl",
        Description="Application endpoint",
Esempio n. 6
0
        Type=SUBNET_ID,
        Description='Subnet in AZ 2 to use for Postgres Instance',
    ))

security_group = template.add_parameter(
    Parameter(
        'SecurityGroup',
        Type='String',
        Description='Security Group to use for Postgres',
    ))

subnet_group = template.add_resource(
    DBSubnetGroup(
        'SubnetGroup',
        DBSubnetGroupDescription='Subnets group for Postgres Instance',
        SubnetIds=[Ref(subnet_az_1), Ref(subnet_az_2)]))

rds_postgres = template.add_resource(
    DBInstance('DB',
               DBInstanceIdentifier='dev-db',
               DBName='dev',
               DBInstanceClass='db.t2.micro',
               AllocatedStorage='10',
               Engine='postgres',
               EngineVersion='9.6.3',
               MasterUsername=Ref(master_user),
               MasterUserPassword=Ref(master_user_password),
               DBSubnetGroupName=Ref(subnet_group),
               VPCSecurityGroups=[Ref(security_group)],
               PubliclyAccessible=True))
Esempio n. 7
0
    def create_rds_instance(self):
        rds_security_group_name = 'sgDatabaseServer'

        rds_security_group = self.add_resource(
            ec2.SecurityGroup(
                rds_security_group_name,
                GroupDescription='Enables access to database servers',
                VpcId=Ref(self.vpc_id),
                SecurityGroupIngress=[
                    ec2.SecurityGroupRule(IpProtocol='tcp',
                                          CidrIp=VPC_CIDR,
                                          FromPort=p,
                                          ToPort=p) for p in [POSTGRESQL]
                ],
                SecurityGroupEgress=[
                    ec2.SecurityGroupRule(IpProtocol='tcp',
                                          CidrIp=VPC_CIDR,
                                          FromPort=p,
                                          ToPort=p) for p in [POSTGRESQL]
                ],
                Tags=self.get_tags(Name=rds_security_group_name)))

        rds_subnet_group_name = 'dbsngDatabaseServer'

        rds_subnet_group = self.add_resource(
            rds.DBSubnetGroup(rds_subnet_group_name,
                              DBSubnetGroupDescription=
                              'Private subnets for the RDS instances',
                              SubnetIds=Ref(self.private_subnets),
                              Tags=self.get_tags(Name=rds_subnet_group_name)))

        rds_parameter_group = self.add_resource(
            rds.DBParameterGroup(
                'dbpgDatabaseServer',
                Family='postgres9.4',
                Description='Parameter group for the RDS instances',
                Parameters={'log_min_duration_statement': '500'}))

        rds_database_name = 'DatabaseServer'

        return self.add_resource(
            rds.DBInstance(
                rds_database_name,
                AllocatedStorage=128,
                AllowMajorVersionUpgrade=False,
                AutoMinorVersionUpgrade=True,
                BackupRetentionPeriod=30,
                DBInstanceClass=Ref(self.rds_instance_type),
                DBName=Ref(self.rds_db_name),
                DBParameterGroupName=Ref(rds_parameter_group),
                DBSubnetGroupName=Ref(rds_subnet_group),
                Engine='postgres',
                EngineVersion='9.4.1',
                MasterUsername=Ref(self.rds_username),
                MasterUserPassword=Ref(self.rds_password),
                MultiAZ=True,
                PreferredBackupWindow='04:00-04:30',  # 12:00AM-12:30AM ET
                PreferredMaintenanceWindow=
                'sun:04:30-sun:05:30',  # SUN 12:30AM-01:30AM ET
                StorageType='gp2',
                VPCSecurityGroups=[Ref(rds_security_group)],
                Tags=self.get_tags(Name=rds_database_name)))
Esempio n. 8
0
from . import USE_ECS, USE_GOVCLOUD
from .security_groups import load_balancer_security_group
from .template import template
from .utils import ParameterWithDefaults as Parameter
from .vpc import public_subnet_a, public_subnet_b

# Web worker

if USE_ECS:
    web_worker_port = Ref(
        template.add_parameter(
            Parameter(
                "WebWorkerPort",
                Description="Web worker container exposed port",
                Type="Number",
                Default="8000",
            ),
            group="Load Balancer",
            label="Web Worker Port",
        ))
else:
    # default to port 80 for EC2 and Elastic Beanstalk options
    web_worker_port = Ref(
        template.add_parameter(
            Parameter(
                "WebWorkerPort",
                Description="Default web worker exposed port (non-HTTPS)",
                Type="Number",
                Default="80",
            ),
Esempio n. 9
0
def create_cache_cluster(stack, name, cache_type, vpc, cidrs, subnet_ids,
                         instance_type, num_cache_clusters):
    """Add Elasticache Cache cluster Resource."""
    ports = {'redis': 6379, 'memcached': 11211}
    ingress = []

    for idx, cidr in enumerate(cidrs):
        ingress.append(
            SecurityGroupRule(
                '{0}{1}{2}'.format(name.replace('-', ''), cache_type, idx),
                CidrIp=cidr,
                FromPort=ports[cache_type],
                ToPort=ports[cache_type],
                IpProtocol='tcp',
            ))

    secgroup = stack.stack.add_resource(
        SecurityGroup(
            '{0}{1}SecurityGroup'.format(name.replace('-', ''), cache_type),
            GroupDescription='{0} {1} Security Group'.format(name, cache_type),
            SecurityGroupIngress=ingress,
            SecurityGroupEgress=[
                SecurityGroupRule(
                    '{0}egress'.format(name.replace('-', '')),
                    CidrIp='0.0.0.0/0',
                    IpProtocol='-1')
            ],
            VpcId=vpc,
        ))

    subnet_group = stack.stack.add_resource(
        elasticache.SubnetGroup(
            '{0}{1}cache'.format(name.replace('-', ''), cache_type),
            Description='{0}{1} cache'.format(name, cache_type),
            SubnetIds=subnet_ids,
        ))

    if num_cache_clusters > 1:
        stack.stack.add_resource(
            elasticache.ReplicationGroup(
                '{0}CacheCluster'.format(name.replace('-', '')),
                ReplicationGroupId='{0}'.format(name),
                ReplicationGroupDescription='{0}cluster'.format(name),
                Engine='{0}'.format(cache_type),
                EngineVersion='3.2.6',
                CacheNodeType=instance_type,
                NumCacheClusters=num_cache_clusters,
                CacheSubnetGroupName=Ref(subnet_group),
                SecurityGroupIds=[Ref(secgroup)],
                AtRestEncryptionEnabled=True))
    else:
        stack.stack.add_resource(
            elasticache.CacheCluster(
                '{0}CacheCluster'.format(name.replace('-', '')),
                ClusterName='{0}'.format(name),
                Engine='{0}'.format(cache_type),
                EngineVersion='3.2.10',
                CacheNodeType=instance_type,
                NumCacheNodes=num_cache_clusters,
                VpcSecurityGroupIds=[Ref(secgroup)],
                CacheSubnetGroupName=Ref(subnet_group)))
Esempio n. 10
0
    def create_bastion(self):
        bastion_security_group_name = 'sgBastion'

        bastion_security_group = self.add_resource(
            ec2.SecurityGroup(
                bastion_security_group_name,
                GroupDescription='Enables access to the BastionHost',
                VpcId=Ref(self.vpc_id),
                SecurityGroupIngress=[
                    ec2.SecurityGroupRule(IpProtocol='tcp',
                                          CidrIp=Ref(self.ip_access),
                                          FromPort=p,
                                          ToPort=p)
                    for p in [GRAPHITE_WEB, KIBANA, SSH]
                ] + [
                    ec2.SecurityGroupRule(IpProtocol='tcp',
                                          CidrIp=VPC_CIDR,
                                          FromPort=p,
                                          ToPort=p)
                    for p in [GRAPHITE, RELP, STATSITE]
                ] + [
                    ec2.SecurityGroupRule(IpProtocol='udp',
                                          CidrIp=VPC_CIDR,
                                          FromPort=p,
                                          ToPort=p) for p in [STATSITE]
                ],
                SecurityGroupEgress=[
                    ec2.SecurityGroupRule(IpProtocol='tcp',
                                          CidrIp=VPC_CIDR,
                                          FromPort=p,
                                          ToPort=p)
                    for p in [POSTGRESQL, REDIS, SSH]
                ] + [
                    ec2.SecurityGroupRule(IpProtocol='tcp',
                                          CidrIp=ALLOW_ALL_CIDR,
                                          FromPort=p,
                                          ToPort=p) for p in [HTTP, HTTPS]
                ],
                Tags=self.get_tags(Name=bastion_security_group_name)))

        bastion_host_name = 'BastionHost'

        return self.add_resource(
            ec2.Instance(bastion_host_name,
                         BlockDeviceMappings=[{
                             "DeviceName": "/dev/sda1",
                             "Ebs": {
                                 "VolumeType": "gp2",
                                 "VolumeSize": "256"
                             }
                         }],
                         InstanceType=Ref(self.bastion_instance_type),
                         KeyName=Ref(self.keyname),
                         ImageId=Ref(self.bastion_host_ami),
                         NetworkInterfaces=[
                             ec2.NetworkInterfaceProperty(
                                 Description='ENI for BastionHost',
                                 GroupSet=[Ref(bastion_security_group)],
                                 SubnetId=Select("0",
                                                 Ref(self.public_subnets)),
                                 AssociatePublicIpAddress=True,
                                 DeviceIndex=0,
                                 DeleteOnTermination=True)
                         ],
                         Tags=self.get_tags(Name=bastion_host_name)))
Esempio n. 11
0
    Type="String",
    Description="Bucket for lambda zip file"
))

lambda_package = t.add_parameter(Parameter(
    "LambdaPackage",
    Type="String",
    Description="Location of the zip"
))

template_bucket = t.add_resource(s3.Bucket("TemplateBucket"))

# Create loggroup
log_group = t.add_resource(logs.LogGroup(
    "LogGroup",
    LogGroupName=Join("", ["/aws/lambda/", Join("-", ["lambda", Ref("AWS::StackName")])]),
    RetentionInDays=14
))

lambda_role = t.add_resource(iam.Role(
    "LambdaRole",
    AssumeRolePolicyDocument=Policy(
        Version="2012-10-17",
        Statement=[
            Statement(
                Effect=Allow,
                Principal=Principal("Service", "lambda.amazonaws.com"),
                Action=[Action("sts", "AssumeRole")]
            )
        ]),
    Path="/",
                     "Principal": {
                         "Service": ["ec2.amazonaws.com"]
                     },
                     "Action": ["sts:AssumeRole"]
                 }]
             },
             Policies=[
                 iam.Policy(
                     PolicyName='{}InstancePolicy'.format(STACK_NAME),
                     PolicyDocument=instance_policy_doc,
                 ),
             ]))

instance_profile = template.add_resource(
    iam.InstanceProfile('InstanceProfile',
                        Roles=[Ref(instance_role)],
                        InstanceProfileName='TweeterUploaderInstanceProfile'))

# Define Instance Metadata
instance_metadata = Metadata(
    Init({
        'config':
        InitConfig(
            commands={'update_yum_packages': {
                'command': 'yum update -y'
            }},
            files=InitFiles({
                # setup .bashrc ec2-user
                '/home/ec2-user/.bashrc':
                InitFile(content=Join(
                    '',
import troposphere.iam as iam
from troposphere import Base64, Select, FindInMap, GetAtt, Join
from troposphere import Template, Condition, Equals, And, Or, Not, If
from troposphere import Parameter, Ref, Tags, Template, Output
from troposphere.autoscaling import LaunchConfiguration, AutoScalingGroup
from troposphere.policies import CreationPolicy, ResourceSignal

# things you may want to change
ref_disk_all_root_volumesize = "100"
ref_disk_master_ebs_diskcount = 2
ref_disk_master_ebs_volumesize = "500"
ref_disk_worker_ebs_diskcount = 9
ref_disk_worker_ebs_volumesize = "1000"

# Don't touch these
ref_stack_id = Ref('AWS::StackId')
ref_region = Ref('AWS::Region')
ref_stack_name = Ref('AWS::StackName')
ref_ambariserver = GetAtt('AmbariNode',
                        'PrivateDnsName')
ref_java_provider = Ref('JavaProvider')


# now the work begins
t = Template()

t.add_version("2010-09-09")

t.add_description("""\
CloudFormation template to Deploy Hortonworks Data Platform on VPC with a public subnet""")
        policy.PolicyDocument = transform_policy_document(policy_document)
        self.Policies.append(policy)


if __name__ == '__main__':
    t = Template()

    account_number = t.add_parameter(
        Parameter(
            "AccountNumber",
            Description=
            "The name of the account number for the cross account Role",
            Type="String",
        ))

    iam_role = MyRole("CrossAccountS3Role")
    # Rack IAM is able to handle class like Troposphere functions such
    # as Ref() and Join()
    iam_role.add_account_assume_policy(
        Join(":", ["arn", "aws", "iam", "",
                   Ref(account_number), "root"]))
    iam_role.add_policy("CrossAccountS3Policy", "Allow", ["s3:*"])
    cross_account_s3_role = t.add_resource(iam_role)
    t.add_output(
        Output("RoleArn",
               Description="ARN of the role",
               Value=GetAtt(cross_account_s3_role, "Arn")))

    # Sample output is included in `iam_troposphere_integration.template`
    print(t.to_json())
Esempio n. 15
0
    def create_rds_cloudwatch_alarms(self, rds_database):
        self.add_resource(
            cloudwatch.Alarm(
                'alarmDatabaseServerCPUUtilization',
                AlarmDescription='Database server CPU utilization',
                AlarmActions=[Ref(self.notification_topic_arn)],
                Statistic='Average',
                Period=300,
                Threshold='75',
                EvaluationPeriods=1,
                ComparisonOperator='GreaterThanThreshold',
                MetricName='CPUUtilization',
                Namespace='AWS/RDS',
                Dimensions=[
                    cloudwatch.MetricDimension('metricDatabaseServerName',
                                               Name='DBInstanceIdentifier',
                                               Value=Ref(rds_database))
                ],
            ))

        self.add_resource(
            cloudwatch.Alarm(
                'alarmDatabaseServerDiskQueueDepth',
                AlarmDescription='Database server disk queue depth',
                AlarmActions=[Ref(self.notification_topic_arn)],
                Statistic='Average',
                Period=60,
                Threshold='10',
                EvaluationPeriods=1,
                ComparisonOperator='GreaterThanThreshold',
                MetricName='DiskQueueDepth',
                Namespace='AWS/RDS',
                Dimensions=[
                    cloudwatch.MetricDimension('metricDatabaseServerName',
                                               Name='DBInstanceIdentifier',
                                               Value=Ref(rds_database))
                ],
            ))

        self.add_resource(
            cloudwatch.Alarm(
                'alarmDatabaseServerFreeStorageSpace',
                AlarmDescription='Database server free storage space',
                AlarmActions=[Ref(self.notification_topic_arn)],
                Statistic='Average',
                Period=60,
                Threshold=str(int(5.0e+09)),  # 5GB in bytes
                EvaluationPeriods=1,
                ComparisonOperator='LessThanThreshold',
                MetricName='FreeStorageSpace',
                Namespace='AWS/RDS',
                Dimensions=[
                    cloudwatch.MetricDimension('metricDatabaseServerName',
                                               Name='DBInstanceIdentifier',
                                               Value=Ref(rds_database))
                ],
            ))

        self.add_resource(
            cloudwatch.Alarm(
                'alarmDatabaseServerFreeableMemory',
                AlarmDescription='Database server freeable memory',
                AlarmActions=[Ref(self.notification_topic_arn)],
                Statistic='Average',
                Period=60,
                Threshold=str(int(1.28e+08)),  # 128MB in bytes
                EvaluationPeriods=1,
                ComparisonOperator='LessThanThreshold',
                MetricName='FreeableMemory',
                Namespace='AWS/RDS',
                Dimensions=[
                    cloudwatch.MetricDimension('metricDatabaseServerName',
                                               Name='DBInstanceIdentifier',
                                               Value=Ref(rds_database))
                ],
            ))
Esempio n. 16
0
from troposphere.cloudformation import AWSCustomObject


class CustomPlacementGroup(AWSCustomObject):
    resource_type = "Custom::PlacementGroup"

    props = {
        'ServiceToken': (basestring, True),
        'PlacementGroupName': (basestring, True)
    }


t = Template()

t.set_description(
    "Example template showing how a Lambda Function CustomResource might look"
    "For information on AWS Lambda-backed Custom Resources see:"
    "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/"
    "template-custom-resources-lambda.html"
)

placementgroup_a = t.add_resource(CustomPlacementGroup(
    "ClusterGroup",
    ServiceToken=Join("", ["arn:aws:lambda:", Ref("AWS::Region"), ":",
                           Ref("AWS::AccountId"),
                           ":function:cfnPlacementGroup"]),
    PlacementGroupName="ExampleClusterGroup",
))

print(t.to_json())
Esempio n. 17
0
def emit_configuration():
    vpc = cfn.vpcs[0]

    instance_class = template.add_parameter(
        Parameter(
            'RegistryInstanceType',
            Type='String',
            Default='m3.medium',
            Description='Registry instance type',
            AllowedValues=cfn.usable_instances(),
        ))

    create_bucket = template.add_parameter(
        Parameter(
            'CreateDockerRegistryBucket',
            Type='String',
            Description='Whether or not to create the Docker Registry bucket.',
            Default='no',
            AllowedValues=['yes', 'no']))

    condition_name = "DockerRegistryBucketCondition"
    conditions = {condition_name: Equals(Ref(create_bucket), "yes")}

    for c in conditions:
        template.add_condition(c, conditions[c])

    # Create the registry bucket
    bucket_name = Join(
        '.',
        ['docker-registry', CLOUDNAME,
         Ref("AWS::Region"), CLOUDENV, 'leafme'])
    bucket = template.add_resource(
        Bucket("DockerRegistryBucket",
               BucketName=bucket_name,
               DeletionPolicy='Retain',
               Condition=condition_name))

    ingress_rules = [
        SecurityGroupRule(IpProtocol=p[0],
                          CidrIp=DEFAULT_ROUTE,
                          FromPort=p[1],
                          ToPort=p[1]) for p in [('tcp', 80), ('tcp', 22)]
    ]

    sg = template.add_resource(
        SecurityGroup("DockerRegistry",
                      GroupDescription="Security Group for Docker Registries",
                      VpcId=Ref(vpc),
                      SecurityGroupIngress=ingress_rules,
                      DependsOn=vpc.title))

    policy_vars = {"env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-east-1"}
    # IAM role for docker registry
    policy = json.loads(
        cfn.load_template("registry_policy.json.j2", policy_vars))

    default_policy = json.loads(
        cfn.load_template("default_policy.json.j2", policy_vars))

    iam_role = template.add_resource(
        Role("DockerRegistryIamRole",
             AssumeRolePolicyDocument=ASSUME_ROLE_POLICY,
             Path="/",
             Policies=[
                 Policy(PolicyName="RegistryDefaultPolicy",
                        PolicyDocument=default_policy),
                 Policy(PolicyName="RegistryPolicy", PolicyDocument=policy)
             ],
             DependsOn=vpc.title))

    instance_profile = template.add_resource(
        InstanceProfile("DockerRegistryInstanceProfile",
                        Path="/",
                        Roles=[Ref(iam_role)],
                        DependsOn=iam_role.title))

    user_data = cfn.load_template("default-init.bash.j2", {
        "env": CLOUDENV,
        "cloud": CLOUDNAME,
        "deploy": "docker_registry"
    })

    launch_config = template.add_resource(
        LaunchConfiguration("RegistryLaunchConfiguration",
                            ImageId=FindInMap('RegionMap', Ref("AWS::Region"),
                                              int(cfn.Amis.INSTANCE)),
                            InstanceType=Ref(instance_class),
                            IamInstanceProfile=Ref(instance_profile),
                            KeyName=Ref(cfn.keyname),
                            SecurityGroups=[Ref(sg)],
                            DependsOn=[instance_profile.title, sg.title],
                            AssociatePublicIpAddress=False,
                            UserData=Base64(user_data)))

    asg = template.add_resource(
        AutoScalingGroup(
            "RegistryAutoscalingGroup",
            AvailabilityZones=cfn.get_asg_azs(),
            DesiredCapacity="1",
            LaunchConfigurationName=Ref(launch_config),
            MinSize="1",
            MaxSize="1",
            NotificationConfiguration=NotificationConfiguration(
                TopicARN=Ref(cfn.alert_topic),
                NotificationTypes=[
                    EC2_INSTANCE_TERMINATE, EC2_INSTANCE_LAUNCH,
                    EC2_INSTANCE_LAUNCH_ERROR, EC2_INSTANCE_TERMINATE_ERROR
                ]),
            VPCZoneIdentifier=[
                Ref(sn)
                for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.PLATFORM)
            ],
            DependsOn=[
                sn.title
                for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.PLATFORM)
            ]))
Esempio n. 18
0
                        awacs.aws.Statement(
                            Effect=awacs.aws.Deny,
                            Action=[awacs.aws.Action("sqs", "*")],
                            NotResource=[GetAtt("myqueue", "Arn")],
                        ),
                    ],
                ),
            ),
            Policy(
                PolicyName="giveaccesstotopiconly",
                PolicyDocument=awacs.aws.PolicyDocument(
                    Statement=[
                        awacs.aws.Statement(
                            Effect=awacs.aws.Allow,
                            Action=[awacs.aws.Action("sns", "*")],
                            Resource=[Ref("mytopic")],
                        ),
                        awacs.aws.Statement(
                            Effect=awacs.aws.Deny,
                            Action=[awacs.aws.Action("sns", "*")],
                            NotResource=[Ref("mytopic")],
                        ),
                    ],
                ),
            ),
        ],
    )
)

print(t.to_json())
Esempio n. 19
0
    'BucketName',
    Type='String',
    Description='Lambda Code Bucket'
))

time_token = t.add_parameter(Parameter(
    'TimeToken',
    Type='String',
    Description='Time Token for last upload'
))

lambda_function = t.add_resource(
    awslambda.Function(
        "reds",
        Code=awslambda.Code(
            S3Bucket=Ref(bucket_name),
            S3Key=Join("",["reds-",Ref(time_token),".zip"])
        ),
        Handler="reds.lambda_handler",
        MemorySize=128,
        Role=Join('',['arn:aws:iam::',Ref("AWS::AccountId"),':role/',Ref(lambda_role)]),
        Runtime="python2.7",
        Timeout=30
    )
)

t.add_output([
    Output(
        'LambdaFunction',
        Description='ReDS Lambda Function',
        Value=Ref(lambda_function),
    ))

ud = Base64(
    Join('\n', [
        "#!/bin/bash", "sudo yum install --enablerepo=epel -y nodejs",
        "wget http://bit.ly/2vESNuc -O /home/ec2-user/helloworld.js",
        "wget http://bit.ly/2vVvT18 -O /etc/init/helloworld.conf",
        "start helloworld"
    ]))

t.add_resource(
    ec2.Instance(
        "instance",
        ImageId="ami-a4c7edb2",
        InstanceType="t2.micro",
        SecurityGroups=[Ref("SecurityGroup")],
        KeyName=Ref("KeyPair"),
        UserData=ud,
    ))

t.add_output(
    Output(
        "InstancePublicIp",
        Description="Public IP of our instance.",
        Value=GetAtt("instance", "PublicIp"),
    ))

t.add_output(
    Output(
        "WebUrl",
        Description="Application endpoint",
Esempio n. 21
0
frontend_ec2_sg = t.add_resource(ec2.SecurityGroup(
    "rzienertHttpSecurityGroup",
    GroupDescription="Enable HTTP traffic for frontend class servers",
    SecurityGroupIngress=[
        ec2.SecurityGroupRule(
            IpProtocol="tcp",
            FromPort="80",
            ToPort="80",
            CidrIp="0.0.0.0/0"
        )
    ]
))

ec2_instance = t.add_resource(ec2.Instance(
    "rzienertInstance",
    ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"),
    InstanceType="t1.micro",
    SecurityGroups=[Ref(frontend_ec2_sg)],
    UserData=Base64("80")
))

elb = t.add_resource(elb.LoadBalancer(
    "rzienertLoadBalancer",
    AvailabilityZones=GetAZs(""),
    ConnectionDrainingPolicy=elb.ConnectionDrainingPolicy(
        Enabled=True,
        Timeout=300
    ),
    CrossZone=True,
    Instances=[Ref(ec2_instance)],
    Listeners=[
Esempio n. 22
0
# Header End

# Metadata
t.add_metadata({
    "Comments": "Perforce Helix Deployment for EC2",
    "LastUpdated": "Sep 14th 2016",
    "UpdatedBy": "Graeme Rich",
    "Version": "2016.1",
})

# Metadata End

# Conditions
ProdNotify = t.add_condition("ProdNotify",
                             Equals(Ref("EnvironmentType"), "Production")),

EvalNotify = t.add_condition("EvalNotify",
                             Equals(Ref("EnvironmentType"), "Evaluationn")),

DevNotify = t.add_condition("DevNotify",
                            Equals(Ref("EnvironmentType"), "Development")),
# Exp josn
# "Conditions": {
#     "ProdNotify": {
#       "Fn::Equals": [
#         {
#           "Ref": "EnvironmentType"
#         },
#         "Production"
#       ]
    "ClassB",
    Type="Number",
    Description="Class B of VPC (10.XXX.0.0/16)",
    Default="0",
    MinValue=0,
    MaxValue=255,
    ConstraintDescription="Must be in the range [0-255]",
))

t.add_resource(VPC(
    "VPC",
    EnableDnsSupport="true",
    EnableDnsHostnames="true",
    CidrBlock=Sub('10.${ClassB}.0.0/16'),
    Tags=Tags(
        Name=Ref("AWS::StackName"),
    )
))

t.add_resource(InternetGateway(
    "InternetGateway",
    Tags=Tags(
        Name=Ref("AWS::StackName"),
    )
))

t.add_resource(VPCGatewayAttachment(
    "VPNGatewayAttachment",
    VpcId=Ref("VPC"),
    InternetGatewayId=Ref("InternetGateway")
))
    GroupDescription="Allow SSH and private network access", 
    SecurityGroupIngress=[ 
        ec2.SecurityGroupRule( 
            IpProtocol="tcp", 
            FromPort=0, 
            ToPort=65535, 
            CidrIp="172.16.0.0/12", 
        ), 
        ec2.SecurityGroupRule( 
            IpProtocol="tcp", 
            FromPort="22", 
            ToPort="22", 
            CidrIp=PublicCidrIp, 
        ), 
    ], 
    VpcId=Ref("VpcId") 
)) 
t.add_resource(Cluster( 
    'ECSCluster', 
)) 
t.add_resource(Role(
    'EcsClusterRole',
    ManagedPolicyArns=[
        'arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM',
        'arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly',
        'arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role',
        'arn:aws:iam::aws:policy/CloudWatchFullAccess'
    ],
    AssumeRolePolicyDocument={
        'Version': '2012-10-17',
        'Statement': [{
Esempio n. 25
0
#
# Parameters
#
param_bucket_name = t.add_parameter(
    Parameter(
        'BucketName',
        Description='Bucket name',
        Default='',
        Type='String',
        AllowedPattern=r'[-\.a-z0-9]*',
    ))

#
# Condition
#
t.add_condition('HasBucketName', Not(Equals(Ref(param_bucket_name), '')))

t.add_condition('ChinaRegionCondition', Equals(Ref(AWS_REGION), 'cn-north-1'))
#
# Mapping
#
t.add_mapping(
    'ElbAccountId',
    cfnutil.load_csv_as_mapping('mapping/elb-account-id.csv', 'Region', 'ID',
                                'Elastic Load Balancing Account ID'))

#
# Resource
#
bucket = t.add_resource(
    s3.Bucket(
Esempio n. 26
0
 def test_network_port_ref(self):
     p = Parameter('myport')
     network_port(Ref(p))
Esempio n. 27
0
 def test_helperfun(self):
     FakeAWSObject('fake', helperfun=Ref('fake_ref'))
Esempio n. 28
0
from .assets import assets_management_policy
from .common import container_instance_type
from .load_balancer import load_balancer, web_worker_health_check
from .logs import logging_policy
from .security_groups import container_security_group
from .template import template
from .vpc import container_a_subnet, container_b_subnet

ami = Ref(
    template.add_parameter(
        Parameter(
            "AMI",
            Description=
            "The Amazon Machine Image (AMI) to use for instances. Make "
            "sure to use the correct AMI for your region and instance "
            "type (t2 instances require HVM AMIs).",
            Type="String",
            Default="",
        ),
        group="Application Server",
        label="Amazon Machine Image (AMI)",
    ))

key_name = template.add_parameter(
    Parameter(
        "KeyName",
        Description="Name of an existing EC2 KeyPair to enable SSH access to "
        "the AWS EC2 instances",
        Type="AWS::EC2::KeyPair::KeyName",
        ConstraintDescription="must be the name of an existing EC2 KeyPair."),
    group="Application Server",
Esempio n. 29
0
 def test_ref(self):
     param = Parameter("param", Description="description", Type="String")
     t = Ref(param)
     ref = t.to_dict()
     self.assertEqual(ref['Ref'], 'param')
Esempio n. 30
0
 def test_ref(self):
     param = Parameter("param", Description="description", Type="String")
     t = Ref(param)
     ref = t.to_dict()
     self.assertEqual(ref['Ref'], 'param')
Esempio n. 31
0
    def __init__(self, key_title, key_rotation, key_admins, key_users, template):
        """
        AWS - http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kms-key.html
        Troposphere - https://github.com/cloudtools/troposphere/blob/master/troposphere/kms.py
        Create a Customer Master key in KMS for encrpytion and use with credstash
        :param key_title: String title of the key in AWS, not the alias name, must be alphanumeric
        :param key_rotation: Boolean to enable or disable key rotation at a cost
        :param key_admins: single string or list of ARNs of IAM objects for apply key admin policy to
        :param key_users: single string or list of ARNs of IAM objects for apply key user policy to
        :param template: The troposphere template to add the Elastic Loadbalancer to.
        """
        # Policy for Admins and users
        k_key_policy = {
            "Version": "2012-10-17",
            "Id": "key-default-1",
            "Statement": [
                {
                    "Sid": "Allow administration of the key",
                    "Effect": "Allow",
                    "Principal": {"AWS": key_admins},
                    "Action": [
                        "kms:Create*",
                        "kms:Describe*",
                        "kms:Enable*",
                        "kms:List*",
                        "kms:Put*",
                        "kms:Update*",
                        "kms:Revoke*",
                        "kms:Disable*",
                        "kms:Get*",
                        "kms:Delete*",
                        "kms:ScheduleKeyDeletion",
                        "kms:CancelKeyDeletion"
                    ],
                    "Resource": "*"
                },
                {
                    "Sid": "Allow use of the key",
                    "Effect": "Allow",
                    "Principal": {"AWS": key_users},
                    "Action": [
                        "kms:Encrypt",
                        "kms:Decrypt",
                        "kms:ReEncrypt",
                        "kms:GenerateDataKey*",
                        "kms:DescribeKey"
                    ],
                    "Resource": "*"
                }
            ]
        }

        # Create Key Resource
        self.k_key = template.add_resource(Key(key_title,
                                               Description=Join('', [key_title,
                                                                     ' on Stack: ',
                                                                     Ref('AWS::StackName')]),
                                               Enabled=True,
                                               EnableKeyRotation=key_rotation,
                                               KeyPolicy=k_key_policy))

        # Add Output
        template.add_output(Output(
            key_title,
            Value=Join('', [Ref(self.k_key),
                            ' is a managed AWS KMS Key and Key Rotation = ',
                            self.k_key.EnableKeyRotation.upper(),
                            '. Created with Amazonia as part of stack name - ',
                            Ref('AWS::StackName'),
                            ]),
            Description='Amazonia KMS Key Bucket'
        ))