def main():
    vpc = ec2.VPC('MyVPC',
                  CidrBlock='10.0.0.0/16')
    subnet = ec2.Subnet('MySubnet',
                        AvailabilityZone='ap-southeast-2a',
                        VpcId=Ref(vpc),
                        CidrBlock='10.0.1.0/24')
    template = Template()
    sns_topic = SNS(template)
    single_instance_config = SingleInstanceConfig(
        keypair='INSERT_YOUR_KEYPAIR_HERE',
        si_image_id='ami-53371f30',
        si_instance_type='t2.micro',
        vpc=Ref(vpc),
        subnet=Ref(subnet),
        is_nat=True,
        instance_dependencies=vpc.title,
        public_hosted_zone_name=None,
        iam_instance_profile_arn=None,
        sns_topic=sns_topic,
        availability_zone='ap-southeast-2a',
        ec2_scheduled_shutdown=None,
        owner='*****@*****.**'
    )
    SingleInstance(title='nat1',
                   template=template,
                   single_instance_config=single_instance_config
                   )

    template.add_resource(vpc)
    template.add_resource(subnet)
    print(template.to_json(indent=2, separators=(',', ': ')))
Beispiel #2
0
def render(context):
    secgroup = ec2_security(context)
    instance = ec2instance(context)

    template = Template()
    template.add_resource(secgroup)
    template.add_resource(instance)

    keyname = template.add_parameter(Parameter(KEYPAIR, **{
        "Type": "String",
        "Description": "EC2 KeyPair that enables SSH access to this instance",
    }))
    
    cfn_outputs = outputs()

    if context['project']['aws'].has_key('rds'):
        map(template.add_resource, rdsinstance(context))
        cfn_outputs.extend([
            mkoutput("RDSHost", "Connection endpoint for the DB cluster", (RDS_TITLE, "Endpoint.Address")),
            mkoutput("RDSPort", "The port number on which the database accepts connections", (RDS_TITLE, "Endpoint.Port")),])
    
    if context['project']['aws'].has_key('ext'):
        map(template.add_resource, ext_volume(context))

    if context['hostname']: # None if one couldn't be generated
        template.add_resource(external_dns(context))
        template.add_resource(internal_dns(context))        
        cfn_outputs.extend([
            mkoutput("DomainName", "Domain name of the newly created EC2 instance", Ref(R53_EXT_TITLE)),
            mkoutput("IntDomainName", "Domain name of the newly created EC2 instance", Ref(R53_INT_TITLE))])

    map(template.add_output, cfn_outputs)
    return template.to_json()
Beispiel #3
0
 def test_s3_filter(self):
     t = Template()
     t.add_resource(
         Function(
             "ProcessorFunction",
             Handler='process_file.handler',
             CodeUri='.',
             Runtime='python3.6',
             Policies='AmazonS3FullAccess',
             Events={
                 'FileUpload': S3Event(
                     'FileUpload',
                     Bucket="bucket",
                     Events=['s3:ObjectCreated:*'],
                     Filter=Filter(S3Key=S3Key(
                         Rules=[
                             Rules(Name="prefix", Value="upload/"),
                             Rules(Name="suffix", Value=".txt"),
                         ],
                     ))
                 )
             }
         )
     )
     t.to_json()
Beispiel #4
0
 def test_mutualexclusion(self):
     t = Template()
     t.add_resource(FakeAWSObject(
         'fake', callcorrect=True, singlelist=[10])
     )
     with self.assertRaises(ValueError):
         t.to_json()
Beispiel #5
0
 def test_simple_table(self):
     serverless_table = SimpleTable(
         "SomeTable"
     )
     t = Template()
     t.add_resource(serverless_table)
     t.to_json()
def main():
    vpc = ec2.VPC('MyVPC',
                  CidrBlock='10.0.0.0/16')
    subnet = ec2.Subnet('MySubnet',
                        AvailabilityZone='ap-southeast-2a',
                        VpcId=Ref(vpc),
                        CidrBlock='10.0.1.0/24')
    template = Template()
    single_instance_config = SingleInstanceConfig(
        keypair='INSERT_YOUR_KEYPAIR_HERE',
        si_image_id='ami-dc361ebf',
        si_instance_type='t2.micro',
        vpc=Ref(vpc),
        subnet=Ref(subnet),
        instance_dependencies=vpc.title,
        public_hosted_zone_name=None,
        sns_topic=None,
        is_nat=False,
        iam_instance_profile_arn=None,
        availability_zone='ap-southeast-2a'
    )
    SingleInstance(title='jump',
                   template=template,
                   single_instance_config=single_instance_config
                   )

    template.add_resource(vpc)
    template.add_resource(subnet)
    print(template.to_json(indent=2, separators=(',', ': ')))
Beispiel #7
0
def sceptre_handler(sceptre_user_data):

    t = Template()

    vpc = t.add_resource(VPC(
        "VirtualPrivateCloud",
        CidrBlock=sceptre_user_data["cidr_block"],
        InstanceTenancy="default",
        EnableDnsSupport=True,
        EnableDnsHostnames=True,
    ))

    igw = t.add_resource(InternetGateway(
        "InternetGateway",
    ))

    t.add_resource(VPCGatewayAttachment(
        "IGWAttachment",
        VpcId=Ref(vpc),
        InternetGatewayId=Ref(igw),
    ))

    t.add_output(Output(
        "VpcId",
        Description="New VPC ID",
        Value=Ref(vpc)
    ))

    return t.to_json()
Beispiel #8
0
class CFLab:
    '''
    Class creates VPC/Subnets/ELB/ASG for Cloudformation lab
    '''

    def __init__(self, config_dictionary):
        '''
        Method initializes the DevDeploy class and composes the CloudFormation template to deploy the solution
        @param config_dictionary [dict] collection of keyword arguments for this class implementation
        '''
        self.globals                    = config_dictionary.get('globals', {})
        self.template_args              = config_dictionary.get('template', {})

        self.template                   = Template()
        self.template.description       = self.globals.get('description', '')

        #create VPC, EC2
        self.vpc_generator = VPCGenerator(self.template_args)
        self.ec2_generator = EC2Generator(
            self.template_args,
            self.vpc_generator.vpc,
            self.vpc_generator.subnets
        )

        for resource in self.vpc_generator.resources:
            self.template.add_resource(resource)

        for resource in self.ec2_generator.resources:
            self.template.add_resource(resource)

        for output in self.ec2_generator.outputs:
            self.template.add_output(output)
 def test_api_no_definition(self):
     serverless_api = Api(
         "SomeApi",
         StageName='test',
     )
     t = Template()
     t.add_resource(serverless_api)
     t.to_json()
Beispiel #10
0
 def test_no_required(self):
     stack = Stack(
         "mystack",
     )
     t = Template()
     t.add_resource(stack)
     with self.assertRaises(ValueError):
         t.to_json()
Beispiel #11
0
    def test_s3_bucket_accelerate_configuration(self):
        t = Template()
        ac = AccelerateConfiguration(AccelerationStatus="Enabled")

        b = Bucket("s3Bucket", AccelerateConfiguration=ac)
        t.add_resource(b)
        output = t.to_json()
        self.assertIn('"AccelerationStatus": "Enabled"', output)
def _generate_template(tms=1, within_vpc=False):
    t = Template()

    t.add_description(FLINK_TEMPLATE_DESCRIPTION)
    t.add_version(FLINK_TEMPLATE_VERSION)
    t.add_metadata({'LastUpdated': datetime.datetime.now().strftime('%c')})

    # mappings
    mappings.add_mappings(t)

    # parameters
    parameters.add_parameters(t)

    vpc = None
    subnet_pri = None
    subnet_pub = None
    if within_vpc:
        # networking resources
        vpc, subnet_pri, subnet_pub = _define_vpc(t)

    # security groups
    sg_ssh = t.add_resource(securitygroups.ssh(
        parameters.ssh_location, vpc))

    sg_jobmanager = t.add_resource(securitygroups.jobmanager(
        parameters.http_location, vpc))

    sg_taskmanager = t.add_resource(securitygroups.taskmanager(None, vpc))

    jobmanager = t.add_resource(instances.jobmanager(
        0,
        [Ref(sg_ssh), Ref(sg_jobmanager)],
        within_vpc,
        subnet_pub
    ))

    prefix = "JobManager00"
    t.add_output(outputs.ssh_to(jobmanager, prefix))
    t.add_output(Output(
        "FlinkWebGui",
        Description="Flink web interface",
        Value=Join("", [
            'http://', GetAtt(jobmanager, "PublicDnsName"), ':8081'
        ])
    ))

    for index in range(0, tms):
        i = t.add_resource(instances.taskmanager(
            index,
            jobmanager,
            [Ref(sg_ssh), Ref(sg_taskmanager)],
            within_vpc,
            subnet_pri
        ))
        prefix = "TaskManager%2.2d" % index
        t.add_output(outputs.ssh_to(i, prefix, bastion=jobmanager))

    return t.to_json()
Beispiel #13
0
 def test_required_api_definitionuri(self):
     serverless_api = Api(
         "SomeApi",
         StageName='test',
         DefinitionUri='s3://bucket/swagger.yml',
     )
     t = Template()
     t.add_resource(serverless_api)
     t.to_json()
Beispiel #14
0
 def test_required_api_definitionbody(self):
     serverless_api = Api(
         "SomeApi",
         StageName='test',
         DefinitionBody=self.swagger,
     )
     t = Template()
     t.add_resource(serverless_api)
     t.to_json()
Beispiel #15
0
def template():
    t = Template()
    for p in parameters.values():
        t.add_parameter(p)
    for k in conditions:
        t.add_condition(k, conditions[k])
    for r in resources.values():
        t.add_resource(r)
    return t
Beispiel #16
0
 def test_yaml_long_form(self):
     t = Template()
     t.add_resource(WaitCondition(
         "MyWaitCondition", Timeout=30, Handle=Sub(cond_string)))
     self.assertEqual(cond_normal, t.to_yaml())
     self.assertEqual(cond_long, t.to_yaml(long_form=True))
     self.assertEqual(cond_long, t.to_yaml(False, True))
     self.assertEqual(cond_clean, t.to_yaml(clean_up=True))
     self.assertEqual(cond_clean, t.to_yaml(True))
Beispiel #17
0
    def test_ne(self):
        t1 = Template(Description='foo1', Metadata='bar1')
        t1.add_resource(Bucket('Baz1'))
        t1.add_output(Output('qux1', Value='qux1'))

        t2 = Template(Description='foo2', Metadata='bar2')
        t2.add_resource(Bucket('Baz2'))
        t2.add_output(Output('qux2', Value='qux2'))

        self.assertNotEqual(t1, t2)
Beispiel #18
0
 def test_required_function(self):
     serverless_func = Function(
         "SomeHandler",
         Handler="index.handler",
         Runtime="nodejs",
         CodeUri="s3://bucket/handler.zip"
     )
     t = Template()
     t.add_resource(serverless_func)
     t.to_json()
Beispiel #19
0
 def test_required(self):
     stack = Stack(
         "mystack",
         DefaultInstanceProfileArn="instancearn",
         Name="myopsworksname",
         ServiceRoleArn="arn",
     )
     t = Template()
     t.add_resource(stack)
     t.to_json()
def main():
    template = Template()
    az_a = 'ap-southeast-2a'
    az_b = 'ap-southeast-2b'
    az_c = 'ap-southeast-2c'

    private_subnets = []
    public_subnets = []

    vpc = Ref(template.add_resource(ec2.VPC('MyVPC',
                                            CidrBlock='10.0.0.0/16')))
    public_route_table = template.add_resource(ec2.RouteTable('MyUnitPublicRouteTable',
                                                              VpcId=vpc))
    private_route_table = template.add_resource(ec2.RouteTable('MyUnitPrivateRouteTable',
                                                               VpcId=vpc))

    public_subnets.append(Subnet(template=template,
                                 route_table=public_route_table,
                                 az=az_a,
                                 cidr='10.0.1.0/24',
                                 vpc=vpc,
                                 is_public=True))
    public_subnets.append(Subnet(template=template,
                                 route_table=public_route_table,
                                 az=az_b,
                                 cidr='10.0.2.0/24',
                                 vpc=vpc,
                                 is_public=True))
    public_subnets.append(Subnet(template=template,
                                 route_table=public_route_table,
                                 az=az_c,
                                 cidr='10.0.3.0/24',
                                 vpc=vpc,
                                 is_public=True))
    private_subnets.append(Subnet(template=template,
                                  route_table=private_route_table,
                                  az=az_a,
                                  cidr='10.0.101.0/24',
                                  vpc=vpc,
                                  is_public=False))
    private_subnets.append(Subnet(template=template,
                                  route_table=private_route_table,
                                  az=az_b,
                                  cidr='10.0.102.0/24',
                                  vpc=vpc,
                                  is_public=False))
    private_subnets.append(Subnet(template=template,
                                  route_table=private_route_table,
                                  az=az_c,
                                  cidr='10.0.103.0/24',
                                  vpc=vpc,
                                  is_public=False))

    print(template.to_json(indent=2, separators=(',', ': ')))
Beispiel #21
0
 def test_optional_auto_publish_alias(self):
     serverless_func = Function(
         "SomeHandler",
         Handler="index.handler",
         Runtime="nodejs",
         CodeUri="s3://bucket/handler.zip",
         AutoPublishAlias="alias"
     )
     t = Template()
     t.add_resource(serverless_func)
     t.to_json()
Beispiel #22
0
 def test_required_api_both(self):
     serverless_api = Api(
         "SomeApi",
         StageName='test',
         DefinitionUri='s3://bucket/swagger.yml',
         DefinitionBody=self.swagger,
     )
     t = Template()
     t.add_resource(serverless_api)
     with self.assertRaises(ValueError):
         t.to_json()
Beispiel #23
0
 def test_valid_data(self):
     t = Template()
     cd = ecs.ContainerDefinition.from_dict("mycontainer", self.d)
     self.assertEquals(cd.Links[0], "containerA")
     td = ecs.TaskDefinition(
             "taskdef",
             ContainerDefinitions=[cd],
             Volumes=[ecs.Volume(Name="myvol")],
             TaskRoleArn=Ref(iam.Role("myecsrole"))
     )
     t.add_resource(td)
     t.to_json()
Beispiel #24
0
 def test_exclusive(self):
     lambda_func = Function(
         "AMIIDLookup",
         Handler="index.handler",
         Role=GetAtt("LambdaExecutionRole", "Arn"),
         Code=Code(S3Bucket="lambda-functions", S3Key="amilookup.zip"),
         Runtime="nodejs",
         Timeout="25",
     )
     t = Template()
     t.add_resource(lambda_func)
     t.to_json()
Beispiel #25
0
 def test_s3_location(self):
     serverless_func = Function(
         "SomeHandler",
         Handler="index.handler",
         Runtime="nodejs",
         CodeUri=S3Location(
             Bucket="mybucket",
             Key="mykey",
         )
     )
     t = Template()
     t.add_resource(serverless_func)
     t.to_json()
Beispiel #26
0
 def test_optional_deployment_preference(self):
     serverless_func = Function(
         "SomeHandler",
         Handler="index.handler",
         Runtime="nodejs",
         CodeUri="s3://bucket/handler.zip",
         AutoPublishAlias="alias",
         DeploymentPreference=DeploymentPreference(
             Type="AllAtOnce"
         )
     )
     t = Template()
     t.add_resource(serverless_func)
     t.to_json()
Beispiel #27
0
 def test_tags(self):
     serverless_func = Function(
         "SomeHandler",
         Handler="index.handler",
         Runtime="nodejs",
         CodeUri="s3://bucket/handler.zip",
         Tags=Tags({
             'Tag1': 'TagValue1',
             'Tag2': 'TagValue2'
         })
     )
     t = Template()
     t.add_resource(serverless_func)
     t.to_json()
Beispiel #28
0
def generate_env_template(app_env, env_dict):
    sg_name = env_dict['sg_name']
    vpc_id = 'vpc-a1d187c4'  # query for this!
    logger.debug('generating template for %s' % vpc_id)
    
    t = Template()
    t.add_version('2010-09-09')
    t.add_description('env template for %s' % app_env)
    app_sg = SecurityGroup('TestAppSecurityGroup')
    app_sg.VpcId = vpc_id
    app_sg.GroupDescription = 'testing'
    app_sg.Tags = name_tag(sg_name)
    t.add_resource(app_sg)
    return t.to_json()
def setup_resources():
    """ Sets Up stack resources
    """
    global vpc, template, public_route_table, private_route_table, az, public_subnets, private_subnets
    template = Template()
    private_subnets = []
    public_subnets = []
    vpc = template.add_resource(ec2.VPC('MyVPC',
                                        CidrBlock='10.0.0.0/16'))
    public_route_table = template.add_resource(ec2.RouteTable('MyUnitPublicRouteTable',
                                                              VpcId=Ref(vpc)))
    private_route_table = template.add_resource(ec2.RouteTable('MyUnitPrivateRouteTable',
                                                               VpcId=Ref(vpc)))
    az = ['ap-southeast-2a', 'ap-southeast-2b', 'ap-southeast-2c']
Beispiel #30
0
 def test_DLQ(self):
     serverless_func = Function(
         "SomeHandler",
         Handler="index.handler",
         Runtime="nodejs",
         CodeUri="s3://bucket/handler.zip",
         DeadLetterQueue=DeadLetterQueue(
             Type='SNS',
             TargetArn='arn:aws:sns:us-east-1:000000000000:SampleTopic'
         )
     )
     t = Template()
     t.add_resource(serverless_func)
     t.to_json()
from troposphere import Ref, Template, Parameter, Output, Join, GetAtt
import troposphere.ec2 as ec2
t = Template()
#SecurityGroup
#AMIID and instanceID
#SSH key pair

sg = ec2.securityGroup("Lampsg")
sg.GroupDescription = "Allow access through ports 80 and 22 to the web server"
sg.SecurityGroupIngress = [
    ec2.SecurityGroupRule(IpProtocol="tcp",
                          FromPort="22",
                          ToPort="22",
                          CidrIp="0.0.0.0/0"),
    ec2.SecurityGroupRule(IpProtocol="tcp",
                          FromPort="80",
                          ToPort="80",
                          CidrIp="0,0,0,/0"),
]

t.add_resource(sg)

print(t.to_json())
Beispiel #32
0
    "IsMultiNodeCluster": Equals(
        Ref("ClusterType"),
        "multi-node"
    ),
}

for k in conditions:
    t.add_condition(k, conditions[k])

redshiftcluster = t.add_resource(Cluster(
    "RedshiftCluster",
    ClusterType=Ref("ClusterType"),
    NumberOfNodes=If("IsMultiNodeCluster",
                     Ref("NumberOfNodes"), Ref("AWS::NoValue")),
    NodeType=Ref("NodeType"),
    DBName=Ref("DatabaseName"),
    MasterUsername=Ref("MasterUsername"),
    MasterUserPassword=Ref("MasterUserPassword"),
    ClusterParameterGroupName=Ref("RedshiftClusterParameterGroup"),
    VpcSecurityGroupIds=Ref("SecurityGroup"),
    ClusterSubnetGroupName=Ref("RedshiftClusterSubnetGroup"),
))

amazonredshiftparameter1 = AmazonRedshiftParameter(
    "AmazonRedshiftParameter1",
    ParameterName="enable_user_activity_logging",
    ParameterValue="true",
)

redshiftclusterparametergroup = t.add_resource(ClusterParameterGroup(
    "RedshiftClusterParameterGroup",
def main(**launch_parameters):
    try:
        t = Template()
        t.set_version("2010-09-09")
        t.set_description("(SOCA) - Base template to deploy DCV nodes")
        allow_anonymous_data_collection = launch_parameters["DefaultMetricCollection"]
        # Launch Actual Capacity
        instance = ec2.Instance(str(launch_parameters["session_name"]))
        instance.BlockDeviceMappings = [{'DeviceName': "/dev/xvda" if launch_parameters["base_os"] == "amazonlinux2" else "/dev/sda1",
                                         'Ebs': {
                                             'DeleteOnTermination': True,
                                             'VolumeSize': 30 if launch_parameters["disk_size"] is False else int(launch_parameters["disk_size"]),
                                             'VolumeType': 'gp2',
                                             'Encrypted': True}
                                         }]
        instance.ImageId = launch_parameters["image_id"]
        instance.SecurityGroupIds = [launch_parameters["security_group_id"]]
        if launch_parameters["hibernate"] is True:
            instance.HibernationOptions = ec2.HibernationOptions(Configured=True)
        instance.InstanceType = launch_parameters["instance_type"]
        instance.SubnetId = random.choice(launch_parameters["soca_private_subnets"]) if len(launch_parameters["soca_private_subnets"]) > 1 else launch_parameters["soca_private_subnets"][0]
        instance.IamInstanceProfile = launch_parameters["ComputeNodeInstanceProfileArn"].split("instance-profile/")[-1]
        instance.KeyName = launch_parameters["KeyName"]
        instance.UserData = Base64(Sub((launch_parameters["user_data"])))
        instance.Tags = base_Tags(
            Name=str(launch_parameters["cluster_id"] + "-" + launch_parameters["session_name"] + "-" + launch_parameters["user"]),
            _soca_JobName=str(launch_parameters["session_name"]),
            _soca_JobOwner=str(launch_parameters["user"]),
            _soca_NodeType="dcv",
            _soca_JobProject="desktop",
            _soca_DCVSupportHibernate=str(launch_parameters["hibernate"]).lower(),
            _soca_ClusterId=str(launch_parameters["cluster_id"]),
            _soca_DCVSessionUUID=str(launch_parameters["session_uuid"]),
            _soca_DCVSystem=str(launch_parameters["base_os"]))
        t.add_resource(instance)

        # Begin Custom Resource
        # Change Mapping to No if you want to disable this
        if allow_anonymous_data_collection is True:
            metrics = CustomResourceSendAnonymousMetrics("SendAnonymousData")
            metrics.ServiceToken = launch_parameters["SolutionMetricLambda"]
            metrics.DesiredCapacity = "1"
            metrics.InstanceType = str(launch_parameters["instance_type"])
            metrics.Efa = "false"
            metrics.ScratchSize = "0"
            metrics.RootSize = str(launch_parameters["disk_size"])
            metrics.SpotPrice = "false"
            metrics.BaseOS = str(launch_parameters["base_os"])
            metrics.StackUUID = str(launch_parameters["session_uuid"])
            metrics.KeepForever = "false"
            metrics.FsxLustre = str({"fsx_lustre": "false", "existing_fsx": "false", "s3_backend": "false", "import_path": "false", "export_path": "false",
                                     "deployment_type": "false", "per_unit_throughput": "false", "capacity": 1200})
            metrics.TerminateWhenIdle = "false"
            metrics.Dcv = "true"
            t.add_resource(metrics)
        # End Custom Resource

        # Tags must use "soca:<Key>" syntax
        template_output = t.to_yaml().replace("_soca_", "soca:")
        return {'success': True,
                'output': template_output}

    except Exception as e:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
        return {'success': False,
                'output': 'cloudformation_builder.py: ' + (str(e) + ': error :' + str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))}
Beispiel #34
0
#!/usr/bin/env python

from troposphere import Output, Template, Ref
from troposphere.s3 import Bucket, Private

t = Template()

s3bucket = t.add_resource(Bucket("code", AccessControl=Private))

t.add_output([
    Output("BucketName",
           Value=Ref(s3bucket),
           Description="ID of Bucket without any DNS")
])

if __name__ == '__main__':
    print t.to_json()
            't2.micro',
            't2.small',
            't2.medium',
            't2.large',
        ],
        ConstraintDescription='must be a valid EC2 T2 instance type.',
    ))

t.add_resource(
    IAMPolicy("MonitoringPolicy",
              PolicyName="AllowSendingDataForMonitoring",
              PolicyDocument=Policy(Statement=[
                  Statement(Effect=Allow,
                            Action=[
                                Action("cloudwatch", "Put*"),
                                Action("logs", "Create*"),
                                Action("logs", "Put*"),
                                Action("logs", "Describe*"),
                                Action("events", "Put*"),
                            ],
                            Resource=["*"])
              ]),
              Roles=[Ref("Role")]))

t.add_resource(
    ec2.SecurityGroup(
        "SecurityGroup",
        GroupDescription="Allow SSH and TCP/{} access".format(ApplicationPort),
        SecurityGroupIngress=[
            ec2.SecurityGroupRule(
                IpProtocol="tcp",
Beispiel #36
0
VpcSubnets = ["subnet-c78a4fae", "subnet-c64b4e8c"]
myvpc = "vpc-f8d62a91"

template = Template()

template.add_version('2010-09-09')

template.add_description("AWS CloudFormation Template Demo")

lbsg = template.add_resource(
    SecurityGroup("lbsg",
                  VpcId='vpc-f8d62a91',
                  GroupDescription='SG For ALB',
                  SecurityGroupIngress=[
                      SecurityGroupRule(
                          IpProtocol='tcp',
                          FromPort='80',
                          ToPort='80',
                          CidrIp='52.56.182.198/32',
                      ),
                  ]))

instancesg = template.add_resource(
    SecurityGroup("instancesg",
                  VpcId='vpc-f8d62a91',
                  GroupDescription='SG For ASG instances',
                  SecurityGroupIngress=[
                      SecurityGroupRule(
                          IpProtocol='tcp',
                          FromPort='80',
                          ToPort='80',
Beispiel #37
0
def GenerateDockerRegistryLayer():
    t = Template()

    t.add_description("""\
    DockerRegistry Layer
    """)

    stackname_param = t.add_parameter(
        Parameter(
            "StackName",
            Description="Environment Name (default: test)",
            Type="String",
            Default="test",
        ))

    vpcid_param = t.add_parameter(
        Parameter(
            "VpcId",
            Type="String",
            Description="VpcId of your existing Virtual Private Cloud (VPC)",
            Default="vpc-fab00e9f"))

    subnets = t.add_parameter(
        Parameter(
            "Subnets",
            Type="CommaDelimitedList",
            Description=(
                "The list SubnetIds, for public subnets in the "
                "region and in your Virtual Private Cloud (VPC) - minimum one"
            ),
            Default="subnet-b68f3bef,subnet-9a6208ff,subnet-bfdd4fc8"))

    keypair_param = t.add_parameter(
        Parameter("KeyPair",
                  Description="Name of an existing EC2 KeyPair to enable SSH "
                  "access to the instance",
                  Type="String",
                  Default="glueteam"))

    registry_ami_id_param = t.add_parameter(
        Parameter("RegistryAmiId",
                  Description="Registry server AMI ID",
                  Type="String",
                  Default="ami-a10897d6"))

    iam_role_param = t.add_parameter(
        Parameter(
            "IamRole",
            Description="IAM Role name",
            Type="String",
        ))

    s3bucket_param = t.add_parameter(
        Parameter(
            "BucketName",
            Description="S3 Bucket Name (default: )",
            Type="String",
            Default="",
        ))

    # --------- Docker registry

    registry_sg = t.add_resource(
        ec2.SecurityGroup(
            'RegistrySG',
            GroupDescription='Security group for Registry host',
            VpcId=Ref(vpcid_param),
            Tags=Tags(Name=Join("", [Ref(stackname_param), "RegistrySG"])),
            SecurityGroupIngress=[
                ec2.SecurityGroupRule(
                    IpProtocol="tcp",
                    FromPort="22",
                    ToPort="22",
                    CidrIp="0.0.0.0/0",
                ),
                ec2.SecurityGroupRule(
                    IpProtocol="tcp",
                    FromPort="80",
                    ToPort="80",
                    CidrIp="0.0.0.0/0",
                ),
            ]))

    registry_eip = t.add_resource(ec2.EIP(
        'RegistryEIP',
        Domain='vpc',
    ))

    registry_eth0 = t.add_resource(
        ec2.NetworkInterface(
            "RegistryEth0",
            Description=Join("", [Ref(stackname_param), "Registry Eth0"]),
            GroupSet=[
                Ref(registry_sg),
            ],
            SourceDestCheck=True,
            SubnetId=Select(0, Ref(subnets)),
            Tags=Tags(
                Name=Join("", [Ref(stackname_param), "Registry Interface 0"]),
                Interface="eth0",
            )))

    registry_host = t.add_resource(
        ec2.Instance(
            'RegistryHost',
            ImageId=Ref(registry_ami_id_param),
            InstanceType='t2.micro',
            KeyName=Ref(keypair_param),
            IamInstanceProfile=Ref(iam_role_param),
            NetworkInterfaces=[
                ec2.NetworkInterfaceProperty(
                    NetworkInterfaceId=Ref(registry_eth0),
                    DeviceIndex="0",
                ),
            ],
            Tags=Tags(Name=Join("", [Ref(stackname_param), "Registry"]),
                      Id=Join("", [Ref(stackname_param), "Registry"])),
            UserData=Base64(
                Join('', [
                    '#!/bin/bash\n',
                    'yum update -y aws-cfn-bootstrap\n',
                    'mkdir -p /root/build/redis /root/build/registry\n',
                    'touch /root/build/redis/Dockerfile\n',
                    'touch /root/build/redis/redis.conf\n',
                    'touch /root/build/registry/Dockerfile\n',
                ])),
        ))

    registry_eip_assoc = t.add_resource(
        ec2.EIPAssociation(
            "RegistryEIPAssoc",
            NetworkInterfaceId=Ref(registry_eth0),
            AllocationId=GetAtt("RegistryEIP", "AllocationId"),
            PrivateIpAddress=GetAtt("RegistryEth0", "PrimaryPrivateIpAddress"),
        ))

    return t
Beispiel #38
0
 def test_badrequired(self):
     with self.assertRaises(ValueError):
         t = Template()
         t.add_resource(Instance('ec2instance'))
         t.to_json()
Beispiel #39
0
 def test_resource(self):
     t = Template()
     r = FakeAWSObject('fake', callcorrect=True)
     t.add_resource(r)
     with self.assertRaises(ValueError):
         t.add_resource(r)
Beispiel #40
0
 def test_ref(self):
     name = 'fake'
     t = Template()
     resource = t.add_resource(Instance(name))
     self.assertEqual(resource.name, name)
Beispiel #41
0
 def test_required_title_error(self):
     with self.assertRaisesRegexp(ValueError, "title:"):
         t = Template()
         t.add_resource(Instance('ec2instance'))
         t.to_json()
Beispiel #42
0

template = Template()
template.add_version('2010-09-09')

artifacts = Artifacts(Type='NO_ARTIFACTS')

environment = Environment(
    ComputeType='BUILD_GENERAL1_SMALL',
    Image='aws/codebuild/java:openjdk-8',
    Type='LINUX_CONTAINER',
    EnvironmentVariables=[{'Name': 'APP_NAME2', 'Value': 'demo2'}],
)

source = Source(
    Location='codebuild-demo-test/0123ab9a371ebf0187b0fe5614fbb72c',
    Type='S3'
)

project = Project(
    "DemoProject",
    Artifacts=artifacts,
    Environment=environment,
    Name='DemoProject',
    ServiceRole='arn:aws:iam::0123456789:role/codebuild-role',
    Source=source,
)
template.add_resource(project)

print(template.to_json())
Beispiel #43
0
from troposphere import Tags, Template
from troposphere.secretsmanager import GenerateSecretString, Secret

t = Template()
t.set_version("2010-09-09")

MySecret = t.add_resource(
    Secret(
        "MySecret",
        Name="MySecret",
        Description="This is an autogenerated secret",
        GenerateSecretString=GenerateSecretString(
            SecretStringTemplate='{"username":"******"}',
            GenerateStringKey="password",
            PasswordLength=30,
        ),
        Tags=Tags(Appname="AppA"),
    )
)

print(t.to_json())
from troposphere import Ref, Template, Parameter, Output, Join, GetAtt, Base64
import troposphere.ec2 as ec2
t = Template()

sg = ec2.SecurityGroup("JenkinsSg")
sg.GroupDescription = "Allow access to ports 22 and 8080"
sg.SecurityGroupIngress = [
	ec2.SecurityGroupRule(IpProtocol = "tcp", FromPort = "22", ToPort = "22", CidrIp = "0.0.0.0/0"),
	ec2.SecurityGroupRule(IpProtocol = "tcp", FromPort = "8080", ToPort = "8080", CidrIp = "0.0.0.0/0")
	]

t.add_resource(sg)

# This is the keypair that CloudFormation will ask you about when launching the stack
keypair = t.add_parameter(Parameter(
    "KeyName",
    Description="Name of the SSH key pair that will be used to access the instance",
    Type="String",
))

instance = ec2.Instance("Jenkins")
instance.ImageId = "ami-e689729e"
instance.InstanceType = "t2.micro"
instance.SecurityGroups = [Ref(sg)]
instance.KeyName = Ref(keypair)
instance.IamInstanceProfile=Ref("InstanceProfile")

t.add_resource(instance)

t.add_output(Output(
    "InstanceAccess",
Beispiel #45
0
        "KeyPair",
        Description="Name of an existing EC2 KeyPair to SSH",
        Type="AWS::EC2::KeyPair::KeyName",
        ConstraintDescription="must be the name of an existing EC2 KeyPair.",
    ))

t.add_resource(
    ec2.SecurityGroup(
        "SecurityGroup",
        GroupDescription="Allow SSH and TCP/{} access".format(ApplicationPort),
        SecurityGroupIngress=[
            ec2.SecurityGroupRule(
                IpProtocol="tcp",
                FromPort="22",
                ToPort="22",
                CidrIp=PublicCidrIp,
            ),
            ec2.SecurityGroupRule(
                IpProtocol="tcp",
                FromPort=ApplicationPort,
                ToPort=ApplicationPort,
                CidrIp="0.0.0.0/0",
            ),
        ],
    ))

ud = Base64(
    Join('\n', [
        "#!/bin/bash", "sudo yum install --enablerepo=epel -y nodejs",
        "wget http://bit.ly/2vESNuc -O /home/ec2-user/helloworld.js",
        "wget http://bit.ly/2vVvT18 -O /etc/init/helloworld.conf",
Beispiel #46
0
 def test_mutualexclusion(self):
     t = Template()
     t.add_resource(FakeAWSObject('fake', callcorrect=True,
                                  singlelist=[10]))
     with self.assertRaises(ValueError):
         t.to_json()
Beispiel #47
0
status_key = 'status/netkan.json'

if not ZONE_ID:
    print('Zone ID Required from EnvVar `CKAN_ZONEID`')
    sys.exit()

t = Template()

t.set_description("Generate NetKAN Infrastructure CF Template")

# Inbound + Outbound SQS Queues
# Inbound: Scheduler Write, Inflation Read
# Outbound: Inflator Write, Indexer Read
inbound = t.add_resource(
    Queue("NetKANInbound",
          QueueName="Inbound.fifo",
          ReceiveMessageWaitTimeSeconds=20,
          FifoQueue=True))
outbound = t.add_resource(
    Queue("NetKANOutbound",
          QueueName="Outbound.fifo",
          ReceiveMessageWaitTimeSeconds=20,
          FifoQueue=True))
addqueue = t.add_resource(
    Queue("Adding",
          QueueName="Adding.fifo",
          ReceiveMessageWaitTimeSeconds=20,
          FifoQueue=True))
mirrorqueue = t.add_resource(
    Queue("Mirroring",
          QueueName="Mirroring.fifo",
        },
        "sa-east-1": {
            "AMI": "ami-3e3be423"
        },
        "ap-southeast-1": {
            "AMI": "ami-74dda626"
        },
        "ap-northeast-1": {
            "AMI": "ami-dcfa4edd"
        }
    })

ec2_instance = template.add_resource(
    ec2.Instance("Ec2Instance",
                 ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"),
                 InstanceType="t1.micro",
                 KeyName=Ref(keyname_param),
                 SecurityGroups=["default"],
                 UserData=Base64("80")))

template.add_output([
    Output(
        "InstanceId",
        Description="InstanceId of the newly created EC2 instance",
        Value=Ref(ec2_instance),
    ),
    Output(
        "AZ",
        Description="Availability Zone of the newly created EC2 instance",
        Value=GetAtt(ec2_instance, "AvailabilityZone"),
    ),
from troposphere import Join, Ref, Template
from troposphere.codebuild import Artifacts, Environment, Project, Source
from troposphere.iam import Role

t = Template()

t.add_description("CodeBuild - Helloworld container")

t.add_resource(
    Role("ServiceRole",
         AssumeRolePolicyDocument=Policy(Statement=[
             Statement(Effect=Allow,
                       Action=[AssumeRole],
                       Principal=Principal("Service",
                                           ["codebuild.amazonaws.com"]))
         ]),
         Path="/",
         ManagedPolicyArns=[
             'arn:aws:iam::aws:policy/AWSCodePipelineReadOnlyAccess',
             'arn:aws:iam::aws:policy/AWSCodeBuildDeveloperAccess',
             'arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryPowerUser',
             'arn:aws:iam::aws:policy/AmazonS3FullAccess',
             'arn:aws:iam::aws:policy/CloudWatchLogsFullAccess'
         ]))

environment = Environment(
    ComputeType='BUILD_GENERAL1_SMALL',
    Image='aws/codebuild/docker:1.12.1',
    Type='LINUX_CONTAINER',
    EnvironmentVariables=[
        {
            'Name': 'REPOSITORY_NAME',
s3dnsname = t.add_parameter(
    Parameter(
        "S3DNSName",
        Description="The DNS name of an existing S3 bucket to use as the "
        "Cloudfront distribution origin",
        Type="String",
    )
)

myDistribution = t.add_resource(
    StreamingDistribution(
        "myDistribution",
        StreamingDistributionConfig=StreamingDistributionConfig(
            Comment="Streaming distribution",
            Enabled=True,
            S3Origin=S3Origin(DomainName=Ref(s3dnsname)),
            TrustedSigners=TrustedSigners(
                Enabled=False,
            ),
        ),
    )
)

t.add_output(
    [
        Output("DistributionId", Value=Ref(myDistribution)),
        Output(
            "DistributionName",
            Value=Join("", ["http://", GetAtt(myDistribution, "DomainName")]),
        ),
    ]
Beispiel #51
0
#
# Resources
#

rds_sg = t.add_resource(ec2.SecurityGroup(
    'RdsSecurityGroup',
    Condition='CreateSecurityGroupCondition',
    VpcId=Ref(param_vpcid),
    GroupDescription='Enable local postgres access',
    SecurityGroupIngress=[
        If('PostgresCondition',
           ec2.SecurityGroupRule(
               IpProtocol='tcp',
               FromPort='5432',
               ToPort='5432',
               CidrIp=Ref(param_db_client_location),
           ),
           Ref(AWS_NO_VALUE)),
        If('MysqlCondition',
           ec2.SecurityGroupRule(
               IpProtocol='tcp',
               FromPort='3306',
               ToPort='3306',
               CidrIp=Ref(param_db_client_location),
           ),
           Ref(AWS_NO_VALUE)),
    ],
))

subnet_group = t.add_resource(rds.DBSubnetGroup(
    'DatabaseSubnetGroup',
    DBSubnetGroupDescription='RDS subnet group',
        "eu-west-3": {
            "AMI": "ami-55b40228",
            "MonitorAMI": "ami-fbb40286"
        },
    })

LoadBalancerSecurityGroup = t.add_resource(
    ec2.SecurityGroup(
        "LoadBalancerSecurityGroup",
        GroupDescription=
        "Enables remote access to port 80 and 443 for the StorReduce load balancer",
        SecurityGroupIngress=[
            ec2.SecurityGroupRule(
                IpProtocol="tcp",
                FromPort="80",
                ToPort="80",
                CidrIp=Ref(RemoteAccessCIDRParam),
            ),
            ec2.SecurityGroupRule(
                IpProtocol="tcp",
                FromPort="443",
                ToPort="443",
                CidrIp=Ref(RemoteAccessCIDRParam),
            ),
        ],
        VpcId=Ref(VpcIdParam)))

MonitorSecurityGroup = t.add_resource(
    ec2.SecurityGroup(
        "MonitorSecurityGroup",
        GroupDescription=
        "Enables remote access to port 3000 and 5601 for StorReduce monitor",
Beispiel #53
0
gcTimeRatio = template.add_parameter(
    Parameter("GcTimeRatioValue",
              Description="Hadoop name node garbage collector time ratio",
              Type=NUMBER,
              Default="19"))

# IAM roles required by EMR

emr_service_role = template.add_resource(
    iam.Role(
        'EMRServiceRole',
        AssumeRolePolicyDocument={
            "Statement": [{
                "Effect": "Allow",
                "Principal": {
                    "Service": ["elasticmapreduce.amazonaws.com"]
                },
                "Action": ["sts:AssumeRole"]
            }]
        },
        ManagedPolicyArns=[
            'arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole'
        ]))

emr_autoscaling_role = "EMR_AutoScaling_DefaultRole"

emr_job_flow_role = template.add_resource(
    iam.Role(
        "EMRJobFlowRole",
        AssumeRolePolicyDocument={
            "Statement": [{
    "eu-central-1": {"AMI": "ami-e68f82fb"},
    "ap-northeast-1": {"AMI": "ami-b80b6db8"},
    "us-east-1": {"AMI": "ami-61bbf104"},
    "sa-east-1": {"AMI": "ami-fd0197e0"},
    "us-west-1": {"AMI": "ami-f77fbeb3"},
    "us-west-2": {"AMI": "ami-d440a6e7"}
})

t.add_mapping("AMAZONLINUX2015", {
    "eu-west-1": {"AMI": "ami-d1f482b1"},
    "us-east-1": {"AMI": "ami-8fcee4e5"},
    "us-west-2": {"AMI": "ami-63b25203"}
})


waitHandleAmbari = t.add_resource(WaitConditionHandle("waitHandleAmbari"))

waitConditionAmbari = t.add_resource(
    WaitCondition(
        "waitConditionAmbari",
        Handle=Ref(waitHandleAmbari),
        Timeout="3600",
    )
)

## Functions to generate blockdevicemappings
##   count: the number of devices to map
##   devicenamebase: "/dev/sd" or "/dev/xvd"
##   volumesize: "100"
##   volumetype: "gp2"
def my_block_device_mappings_root(devicenamebase,volumesize,volumetype):
Beispiel #55
0
 def test_simple_table(self):
     serverless_table = SimpleTable("SomeTable")
     t = Template()
     t.add_resource(serverless_table)
     t.to_json()
Beispiel #56
0
topic_rule = TopicRule(
    "MyTopicRule",
    RuleName="NameParameter",
    TopicRulePayload=TopicRulePayload(
        RuleDisabled=True,
        Sql="SELECT temp FROM SomeTopic WHERE temp > 60",
        Actions=[
            Action(
                Lambda=LambdaAction(
                    FunctionArn="arn",
                ),
            ),
            Action(
                IotAnalytics=IotAnalyticsAction(
                    ChannelName="mychannel",
                    RoleArn="arn",
                ),
            ),
        ],
    ),
)

t.add_resource(certificate)
t.add_resource(policy)
t.add_resource(policy_principal)
t.add_resource(thing)
t.add_resource(thing_principal)
t.add_resource(topic_rule)

print(t.to_json())
Beispiel #57
0
def generate_vpc_template(layers, az_count, cidr_block):
    TPL = Template()
    TPL.set_description('VPC - Version 2019-06-05')
    TPL.set_metadata({'Author': 'https://github.com/johnpreston'})
    VPC = VPCType('VPC',
                  CidrBlock=cidr_block,
                  EnableDnsHostnames=True,
                  EnableDnsSupport=True,
                  Tags=Tags(Name=Ref('AWS::StackName'),
                            EnvironmentName=Ref('AWS::StackName')))
    IGW = InternetGateway("InternetGateway")
    IGW_ATTACH = VPCGatewayAttachment("VPCGatewayAttachement",
                                      InternetGatewayId=Ref(IGW),
                                      VpcId=Ref(VPC))
    DHCP_OPTIONS = DHCPOptions('VpcDhcpOptions',
                               DomainName=Sub(f'${{AWS::StackName}}.local'),
                               DomainNameServers=['AmazonProvidedDNS'],
                               Tags=Tags(Name=Sub(f'DHCP-${{{VPC.title}}}')))
    DHCP_ATTACH = VPCDHCPOptionsAssociation('VpcDhcpOptionsAssociate',
                                            DhcpOptionsId=Ref(DHCP_OPTIONS),
                                            VpcId=Ref(VPC))
    DNS_HOSTED_ZONE = HostedZone(
        'VpcHostedZone',
        VPCs=[HostedZoneVPCs(VPCId=Ref(VPC), VPCRegion=Ref('AWS::Region'))],
        Name=Sub(f'${{AWS::StackName}}.local'),
        HostedZoneTags=Tags(Name=Sub(f'ZoneFor-${{{VPC.title}}}')))
    TPL.add_resource(VPC)
    TPL.add_resource(IGW)
    TPL.add_resource(IGW_ATTACH)
    TPL.add_resource(DHCP_OPTIONS)
    TPL.add_resource(DHCP_ATTACH)
    TPL.add_resource(DNS_HOSTED_ZONE)
    STORAGE_RTB = TPL.add_resource(
        RouteTable('StorageRtb', VpcId=Ref(VPC), Tags=Tags(Name='StorageRtb')))
    STORAGE_SUBNETS = []
    for count, subnet_cidr in zip(az_count, layers['stor']):
        subnet = Subnet(
            f'StorageSubnet{alpha[count].upper()}',
            CidrBlock=subnet_cidr,
            VpcId=Ref(VPC),
            AvailabilityZone=Sub(f'${{AWS::Region}}{alpha[count]}'),
            Tags=Tags(Name=Sub(f'${{AWS::StackName}}-Storage-{alpha[count]}'),
                      Usage="Storage"))
        subnet_assoc = TPL.add_resource(
            SubnetRouteTableAssociation(
                f'StorageSubnetAssoc{alpha[count].upper()}',
                SubnetId=Ref(subnet),
                RouteTableId=Ref(STORAGE_RTB)))
        STORAGE_SUBNETS.append(subnet)
        TPL.add_resource(subnet)
    PUBLIC_RTB = TPL.add_resource(
        RouteTable('PublicRtb', VpcId=Ref(VPC), Tags=Tags(Name='PublicRtb')))
    PUBLIC_ROUTE = TPL.add_resource(
        Route('PublicDefaultRoute',
              GatewayId=Ref(IGW),
              RouteTableId=Ref(PUBLIC_RTB),
              DestinationCidrBlock='0.0.0.0/0'))
    PUBLIC_SUBNETS = []
    NAT_GATEWAYS = []
    for count, subnet_cidr in zip(az_count, layers['pub']):
        subnet = Subnet(
            f'PublicSubnet{alpha[count].upper()}',
            CidrBlock=subnet_cidr,
            VpcId=Ref(VPC),
            AvailabilityZone=Sub(f'${{AWS::Region}}{alpha[count]}'),
            MapPublicIpOnLaunch=True,
            Tags=Tags(Name=Sub(f'${{AWS::StackName}}-Public-{alpha[count]}')))
        eip = TPL.add_resource(
            EIP(f"NatGatewayEip{alpha[count].upper()}", Domain='vpc'))
        nat = NatGateway(f"NatGatewayAz{alpha[count].upper()}",
                         AllocationId=GetAtt(eip, 'AllocationId'),
                         SubnetId=Ref(subnet))
        subnet_assoc = TPL.add_resource(
            SubnetRouteTableAssociation(
                f'PublicSubnetsRtbAssoc{alpha[count].upper()}',
                RouteTableId=Ref(PUBLIC_RTB),
                SubnetId=Ref(subnet)))
        NAT_GATEWAYS.append(nat)
        PUBLIC_SUBNETS.append(subnet)
        TPL.add_resource(nat)
        TPL.add_resource(subnet)
    APP_SUBNETS = []
    APP_RTBS = []
    for count, subnet_cidr, nat in zip(az_count, layers['app'], NAT_GATEWAYS):
        SUFFIX = alpha[count].upper()
        subnet = Subnet(
            f'AppSubnet{SUFFIX}',
            CidrBlock=subnet_cidr,
            VpcId=Ref(VPC),
            AvailabilityZone=Sub(f'${{AWS::Region}}{alpha[count]}'),
            Tags=Tags(Name=Sub(f'${{AWS::StackName}}-App-{alpha[count]}')))
        APP_SUBNETS.append(subnet)
        rtb = RouteTable(f'AppRtb{alpha[count].upper()}',
                         VpcId=Ref(VPC),
                         Tags=Tags(Name=f'AppRtb{alpha[count].upper()}'))
        APP_RTBS.append(rtb)
        route = Route(f'AppRoute{alpha[count].upper()}',
                      NatGatewayId=Ref(nat),
                      RouteTableId=Ref(rtb),
                      DestinationCidrBlock='0.0.0.0/0')
        subnet_assoc = SubnetRouteTableAssociation(
            f'SubnetRtbAssoc{alpha[count].upper()}',
            RouteTableId=Ref(rtb),
            SubnetId=Ref(subnet))
        TPL.add_resource(subnet)
        TPL.add_resource(rtb)
        TPL.add_resource(route)
        TPL.add_resource(subnet_assoc)

    APP_S3_ENDPOINT = VPCEndpoint(
        'AppS3Endpoint',
        VpcId=Ref(VPC),
        RouteTableIds=[Ref(rtb) for rtb in APP_RTBS],
        ServiceName=Sub('com.amazonaws.${AWS::Region}.s3'),
        VpcEndpointType='Gateway',
    )
    PUBLIC_S3_ENDPOINT = VPCEndpoint(
        'PublicS3Endpoint',
        VpcId=Ref(VPC),
        RouteTableIds=[Ref(PUBLIC_RTB)],
        ServiceName=Sub('com.amazonaws.${AWS::Region}.s3'),
        VpcEndpointType='Gateway',
    )
    STORAGE_S3_ENDPOINT = VPCEndpoint(
        'StorageS3Endpoint',
        VpcId=Ref(VPC),
        RouteTableIds=[Ref(STORAGE_RTB)],
        ServiceName=Sub('com.amazonaws.${AWS::Region}.s3'),
        VpcEndpointType='Gateway')
    RESOURCES = []
    for count in az_count:
        resource = TPL.add_resource(EIP(f'Eip{count}', Domain='vpc'))
        RESOURCES.append(resource)
    TPL.add_resource(APP_S3_ENDPOINT)
    TPL.add_resource(PUBLIC_S3_ENDPOINT)
    TPL.add_resource(STORAGE_S3_ENDPOINT)
    SG_RULES = []
    for subnet in layers['app']:
        RULE = SecurityGroupRule(
            IpProtocol="tcp",
            FromPort="443",
            ToPort="443",
            CidrIp=subnet,
        )
        SG_RULES.append(RULE)

    ENDPOINT_SG = TPL.add_resource(
        SecurityGroup(
            'VpcEndpointSecurityGroup',
            VpcId=Ref(VPC),
            GroupDescription='SG for all Interface VPC Endpoints',
            SecurityGroupIngress=SG_RULES,
            Tags=Tags(Name="sg-endpoints"),
        ))

    APP_SNS_ENDPOINT = VPCEndpoint(
        'AppSNSEndpoint',
        VpcId=Ref(VPC),
        SubnetIds=[Ref(subnet) for subnet in APP_SUBNETS],
        SecurityGroupIds=[GetAtt(ENDPOINT_SG, 'GroupId')],
        ServiceName=Sub('com.amazonaws.${AWS::Region}.sns'),
        VpcEndpointType='Interface',
        PrivateDnsEnabled=True)
    TPL.add_resource(APP_SNS_ENDPOINT)

    APP_SQS_ENDPOINT = VPCEndpoint(
        'AppSQSEndpoint',
        VpcId=Ref(VPC),
        SubnetIds=[Ref(subnet) for subnet in APP_SUBNETS],
        SecurityGroupIds=[GetAtt(ENDPOINT_SG, 'GroupId')],
        ServiceName=Sub('com.amazonaws.${AWS::Region}.sqs'),
        VpcEndpointType='Interface',
        PrivateDnsEnabled=True)
    TPL.add_resource(APP_SQS_ENDPOINT)

    APP_ECR_API_ENDPOINT = VPCEndpoint(
        'AppECRAPIEndpoint',
        VpcId=Ref(VPC),
        SubnetIds=[Ref(subnet) for subnet in APP_SUBNETS],
        SecurityGroupIds=[GetAtt(ENDPOINT_SG, 'GroupId')],
        ServiceName=Sub('com.amazonaws.${AWS::Region}.ecr.api'),
        VpcEndpointType='Interface',
        PrivateDnsEnabled=True)
    TPL.add_resource(APP_ECR_API_ENDPOINT)

    APP_ECR_DKR_ENDPOINT = VPCEndpoint(
        'AppECRDKREndpoint',
        VpcId=Ref(VPC),
        SubnetIds=[Ref(subnet) for subnet in APP_SUBNETS],
        SecurityGroupIds=[GetAtt(ENDPOINT_SG, 'GroupId')],
        ServiceName=Sub('com.amazonaws.${AWS::Region}.ecr.dkr'),
        VpcEndpointType='Interface',
        PrivateDnsEnabled=True)
    TPL.add_resource(APP_ECR_DKR_ENDPOINT)

    APP_SECRETS_MANAGER_ENDPOINT = VPCEndpoint(
        'AppSecretsManagerEndpoint',
        VpcId=Ref(VPC),
        SubnetIds=[Ref(subnet) for subnet in APP_SUBNETS],
        SecurityGroupIds=[GetAtt(ENDPOINT_SG, 'GroupId')],
        ServiceName=Sub('com.amazonaws.${AWS::Region}.secretsmanager'),
        VpcEndpointType='Interface',
        PrivateDnsEnabled=True)
    TPL.add_resource(APP_SECRETS_MANAGER_ENDPOINT)

    APP_SSM_ENDPOINT = VPCEndpoint(
        'AppSSMEndpoint',
        VpcId=Ref(VPC),
        SubnetIds=[Ref(subnet) for subnet in APP_SUBNETS],
        SecurityGroupIds=[GetAtt(ENDPOINT_SG, 'GroupId')],
        ServiceName=Sub('com.amazonaws.${AWS::Region}.ssm'),
        VpcEndpointType='Interface',
        PrivateDnsEnabled=True)
    TPL.add_resource(APP_SSM_ENDPOINT)

    APP_SSM_MESSAGES_ENDPOINT = VPCEndpoint(
        'AppSSMMessagesEndpoint',
        VpcId=Ref(VPC),
        SubnetIds=[Ref(subnet) for subnet in APP_SUBNETS],
        SecurityGroupIds=[GetAtt(ENDPOINT_SG, 'GroupId')],
        ServiceName=Sub('com.amazonaws.${AWS::Region}.ssmmessages'),
        VpcEndpointType='Interface',
        PrivateDnsEnabled=True)
    TPL.add_resource(APP_SSM_MESSAGES_ENDPOINT)

    ################################################################################
    #
    # OUTPUTS
    #
    TPL.add_output(object_outputs(VPC, name_is_id=True))
    TPL.add_output(object_outputs(APP_SQS_ENDPOINT, name_is_id=True))
    TPL.add_output(object_outputs(APP_SNS_ENDPOINT, name_is_id=True))
    TPL.add_output(
        comments_outputs([{
            'EIP':
            Join(',',
                 [GetAtt(resource, "AllocationId") for resource in RESOURCES])
        }, {
            'PublicSubnets':
            Join(',', [Ref(subnet) for subnet in PUBLIC_SUBNETS])
        }, {
            'StorageSubnets':
            Join(',', [Ref(subnet) for subnet in STORAGE_SUBNETS])
        }, {
            'ApplicationSubnets':
            Join(',', [Ref(subnet) for subnet in APP_SUBNETS])
        }, {
            'StackName': Ref('AWS::StackName')
        }, {
            'VpcZoneId': Ref(DNS_HOSTED_ZONE)
        }]))
    return TPL
Beispiel #58
0
                S3DestinationConfiguration=S3DestinationConfiguration(
                    Bucket="testbucket",
                    Key="testkey",
                    RoleArn="arn",
                ),
            ),
            EntryName="entryname",
        )
    ],
    DatasetName="testdataset",
    RetentionPeriod=RetentionPeriod(
        Unlimited=True,
    ),
    Tags=Tags(Manufacturer="AmazonWebServices"),
    Triggers=[
        Trigger(
            TriggeringDataset=TriggeringDataset(DatasetName="testdataset"),
        ),
    ],
    VersioningConfiguration=VersioningConfiguration(
        Unlimited=True,
    ),
)

t.add_resource(channel)
t.add_resource(pipeline)
t.add_resource(datastore)
t.add_resource(dataset)

print(t.to_json())
Beispiel #59
0
    "**WARNING** This template creates an Amazon EC2 instance. "
    "You will be billed for the AWS resources used if you create "
    "a stack from this template.")

s3dnsname = t.add_parameter(Parameter(
    "S3DNSNAme",
    Description="The DNS name of an existing S3 bucket to use as the "
                "Cloudfront distribution origin",
    Type="String",
))

myDistribution = t.add_resource(Distribution(
    "myDistribution",
    DistributionConfig=DistributionConfig(
        Origins=[Origin(Id="Origin 1", DomainName=Ref(s3dnsname))],
        DefaultCacheBehavior=DefaultCacheBehavior(
            TargetOriginId="Origin 1",
            ViewerProtocolPolicy="allow-all"),
        Enabled=True
    )
))

t.add_output([
    Output("DistributionId", Value=Ref(myDistribution)),
    Output(
        "DistributionName",
        Value=Join("", ["http://", GetAtt(myDistribution, "DomainName")])),
])

print(t.to_json())
Beispiel #60
0
from troposphere import Template, Join, Base64
import troposphere.ec2 as ec2

t = Template()
t.add_resource(
    ec2.Instance(
        'Ec2Instance',
        ImageId='ami-16fd7026',
        InstanceType='t3.nano',
        KeyName='mykey',
        UserData=Base64(Join('', ['#!/bin/bash\n', 'echo "Hello"\n'])),
    ))

print(t.to_yaml())