Пример #1
0
    def test_join(self):
        delimiter = ','
        source_string = (
                '{ [ "arn:aws:lambda:",{ "Ref": "AWS::Region" },":",'
                '{ "Ref": "AWS::AccountId" },'
                '":function:cfnRedisEndpointLookup" ] }'
        )
        raw = Join(delimiter, source_string)
        actual = raw.to_dict()
        expected = (
            {'Fn::Join': [',', '{ [ "arn:aws:lambda:",{ "Ref": '
                          '"AWS::Region" },":",{ "Ref": "AWS::AccountId" },'
                          '":function:cfnRedisEndpointLookup" ] }']}

        )
        self.assertEqual(expected, actual)

        with self.assertRaises(ValueError):
            Join(10, "foobar")
Пример #2
0
# Create the ECS Cluster
ECSCluster = t.add_resource(ecs.Cluster("ECSCluster", ClusterName="Fargate"))

# Create the VPC
VPC = t.add_resource(
    ec2.VPC("VPC",
            CidrBlock="10.0.0.0/16",
            EnableDnsSupport="true",
            EnableDnsHostnames="true"))

PubSubnetAz1 = t.add_resource(
    ec2.Subnet(
        "PubSubnetAz1",
        CidrBlock="10.0.0.0/24",
        VpcId=Ref(VPC),
        AvailabilityZone=Join("", [Ref('AWS::Region'), "a"]),
    ))

PubSubnetAz2 = t.add_resource(
    ec2.Subnet(
        "PubSubnetAz2",
        CidrBlock="10.0.1.0/24",
        VpcId=Ref(VPC),
        AvailabilityZone=Join("", [Ref('AWS::Region'), "b"]),
    ))

InternetGateway = t.add_resource(ec2.InternetGateway("InternetGateway", ))

AttachGateway = t.add_resource(
    ec2.VPCGatewayAttachment("AttachGateway",
                             VpcId=Ref(VPC),
    "Creates an AWS WAF configuration that protects against common attacks")

WebACLName = t.add_parameter(
    Parameter(
        "WebACLName",
        Default="CommonAttackProtection",
        Type="String",
        Description="Enter the name you want to use for the WebACL. "
        "This value is also added as a prefix for the names of the rules, "
        "conditions, and CloudWatch metrics created by this template.",
    ))

SqliMatchSet = t.add_resource(
    SqlInjectionMatchSet(
        "SqliMatchSet",
        Name=Join("", [Ref(WebACLName), "SqliMatch"]),
        SqlInjectionMatchTuples=[
            SqlInjectionMatchTuples(
                FieldToMatch=FieldToMatch(Type="QUERY_STRING"),
                TextTransformation="URL_DECODE"),
            SqlInjectionMatchTuples(
                FieldToMatch=FieldToMatch(Type="QUERY_STRING"),
                TextTransformation="HTML_ENTITY_DECODE"),
            SqlInjectionMatchTuples(FieldToMatch=FieldToMatch(Type="BODY"),
                                    TextTransformation="URL_DECODE"),
            SqlInjectionMatchTuples(FieldToMatch=FieldToMatch(Type="BODY"),
                                    TextTransformation="HTML_ENTITY_DECODE"),
            SqlInjectionMatchTuples(FieldToMatch=FieldToMatch(Type="URI"),
                                    TextTransformation="URL_DECODE")
        ]))
Пример #4
0
            IpProtocol="tcp",
            FromPort=ApplicationPort,
            ToPort=ApplicationPort,
            CidrIp="0.0.0.0/0",
        ),
    ],
))

ud = Base64(Join('\n', [
    "#!/bin/bash",
    "sudo yum -y install git",
    "git clone https://github.com/du6/talkmeup.git",
    "sudo yum -y install python35",
    "sudo yum -y install python35-setuptools",
    "sudo easy_install-3.5 pip",
    "pip3 install --upgrade pip",
    "sudo python3 -m pip install virtualenv",
    "virtualenv -p python3 talkmeup-env",
    "source talkmeup-env/bin/activate",
    "cd talkmeup",
    "pip install -r requirements.txt",
    "python manage.py migrate",
    "python manage.py runserver 0.0.0.0:" + ApplicationPort,
]))

t.add_resource(ec2.Instance(
    "instance",
    ImageId="ami-9e90a5fe",
    InstanceType="t2.micro",
    SecurityGroups=[Ref("SecurityGroup")],
    KeyName=Ref("KeyPair"),
    # UserData=ud, // Configured with ansible
Пример #5
0
t.set_description(
    "AWS CloudFormation Sample Template Route53_CNAME: Sample template "
    "showing how to create an Amazon Route 53 CNAME record.  It assumes that "
    "you already  have a Hosted Zone registered with Amazon Route 53. "
    "**WARNING** This template creates an Amazon EC2 instance. "
    "You will be billed for the AWS resources used if you create "
    "a stack from this template.")

hostedzone = t.add_parameter(Parameter(
    "HostedZone",
    Description="The DNS name of an existing Amazon Route 53 hosted zone",
    Type="String",
))

myDNSRecord = t.add_resource(RecordSetType(
    "myDNSRecord",
    HostedZoneName=Join("", [Ref(hostedzone), "."]),
    Comment="CNAME redirect to aws.amazon.com.",
    Name=Join("", [Ref("AWS::StackName"), ".", Ref("AWS::Region"), ".",
              Ref(hostedzone), "."]),
    Type="CNAME",
    TTL="900",
    ResourceRecords=["aws.amazon.com"]
))


t.add_output(Output("DomainName", Value=Ref(myDNSRecord)))

print(t.to_json())
Пример #6
0
def empire_policy(resources):
    p = Policy(Statement=[
        Statement(Effect=Allow,
                  Resource=[resources['CustomResourcesTopic']],
                  Action=[sns.Publish]),
        Statement(Effect=Allow,
                  Resource=[resources['CustomResourcesQueue']],
                  Action=[
                      sqs.ReceiveMessage, sqs.DeleteMessage,
                      sqs.ChangeMessageVisibility
                  ]),
        Statement(Effect=Allow,
                  Resource=[resources['TemplateBucket']],
                  Action=[
                      s3.PutObject, s3.PutObjectAcl, s3.PutObjectVersionAcl,
                      s3.GetObject, s3.GetObjectVersion, s3.GetObjectAcl,
                      s3.GetObjectVersionAcl
                  ]),
        Statement(Effect=Allow,
                  Resource=["*"],
                  Action=[
                      awslambda.CreateFunction, awslambda.DeleteFunction,
                      awslambda.UpdateFunctionCode,
                      awslambda.GetFunctionConfiguration,
                      awslambda.AddPermission, awslambda.RemovePermission
                  ]),
        Statement(Effect=Allow,
                  Resource=["*"],
                  Action=[
                      events.PutRule, events.DeleteRule, events.DescribeRule,
                      events.EnableRule, events.DisableRule, events.PutTargets,
                      events.RemoveTargets
                  ]),
        Statement(Effect=Allow,
                  Resource=[
                      Join('', [
                          'arn:aws:cloudformation:',
                          Ref('AWS::Region'), ':',
                          Ref('AWS::AccountId'), ':stack/',
                          resources['Environment'], '-*'
                      ])
                  ],
                  Action=[
                      cloudformation.CreateStack, cloudformation.UpdateStack,
                      cloudformation.DeleteStack, cloudformation.
                      ListStackResources, cloudformation.DescribeStackResource,
                      cloudformation.DescribeStacks
                  ]),
        Statement(Effect=Allow,
                  Resource=['*'],
                  Action=[cloudformation.ValidateTemplate]),
        Statement(Effect=Allow,
                  Resource=["*"],
                  Action=[
                      ecs.CreateService, ecs.DeleteService,
                      ecs.DeregisterTaskDefinition,
                      ecs.Action("Describe*"),
                      ecs.Action("List*"), ecs.RegisterTaskDefinition,
                      ecs.RunTask, ecs.StartTask, ecs.StopTask,
                      ecs.SubmitTaskStateChange, ecs.UpdateService
                  ]),
        Statement(
            Effect=Allow,
            # TODO: Limit to specific ELB?
            Resource=["*"],
            Action=[
                elb.Action("Describe*"),
                elb.AddTags,
                elb.CreateLoadBalancer,
                elb.DescribeTags,
                elb.DeleteLoadBalancer,
                elb.ConfigureHealthCheck,
                elb.ModifyLoadBalancerAttributes,
                elb.SetLoadBalancerListenerSSLCertificate,
                elb.SetLoadBalancerPoliciesOfListener,
                elb.Action("CreateTargetGroup"),
                elb.Action("CreateListener"),
                elb.Action("DeleteListener"),
                elb.Action("DeleteTargetGroup"),
                elb.Action("ModifyTargetGroup"),
                elb.Action("ModifyTargetGroupAttributes"),
            ]),
        Statement(Effect=Allow,
                  Resource=["*"],
                  Action=[ec2.DescribeSubnets, ec2.DescribeSecurityGroups]),
        Statement(Effect=Allow,
                  Action=[
                      iam.GetServerCertificate, iam.UploadServerCertificate,
                      iam.DeleteServerCertificate, iam.PassRole
                  ],
                  Resource=["*"]),
        Statement(
            Effect=Allow,
            Action=[
                Action("route53", "ListHostedZonesByName"),
                route53.ChangeResourceRecordSets,
                route53.ListHostedZones,
                route53.GetHostedZone,
                route53.GetChange,
            ],
            # TODO: Limit to specific zones
            Resource=["*"]),
        Statement(Effect=Allow,
                  Action=[
                      kinesis.DescribeStream,
                      Action(kinesis.prefix, "Get*"),
                      Action(kinesis.prefix, "List*"),
                      kinesis.PutRecord,
                  ],
                  Resource=["*"]),
    ])
    return p
Пример #7
0
                               "Key": "Service",
                               "Value": "ServiceVPC"
                           }, {
                               "Key": "VPC",
                               "Value": env
                           }],
                           VpcId=Ref("VPC"))
template.add_resource(bastion_sg)

cloud_watch_alarm_topic = Topic(
    "CloudWatchAlarmTopic",
    TopicName="Api{e}-{e}-CloudWatchAlarms".format(e=env))
template.add_resource(cloud_watch_alarm_topic)

dhcp_options = DHCPOptions("DomainName",
                           DomainName=Join(
                               "", [Ref("AWS::Region"), ".compute.internal"]),
                           DomainNameServers=["AmazonProvidedDNS"],
                           Tags=[{
                               "Key": "Environment",
                               "Value": "Api{e}".format(e=env)
                           }, {
                               "Key":
                               "Name",
                               "Value":
                               "Api{e}-{e}-DhcpOptions".format(e=env)
                           }, {
                               "Key": "Owner",
                               "Value": "Foo industries"
                           }, {
                               "Key": "Service",
                               "Value": "ServiceVPC"
Пример #8
0
    def add_cache_invalidation(self, stack: Stack) -> list[AWSObject]:
        """Return resources invalidating cache when objects are pushed to s3.

        A lambda is called at each s3 object update to invalidate cloudformation
        cache for the updated object.
        """
        lambda_name = f"{self.name}-cache-invalidation-lambda"
        lambda_policy = ManagedPolicy(
            name=f"{lambda_name}-policy",
            description=f"managed policy used by {lambda_name}",
            path=f"/{stack.name}/",
            statements=[
                Allow(
                    action=["cloudfront:CreateInvalidation"],
                    resource=Join(
                        "",
                        [
                            "arn:aws:cloudfront::", AccountId,
                            ":distribution ", self.id
                        ],
                    ),
                )
            ],
        )
        lambda_role = Role(
            name=f"{lambda_name}-role",
            description=f"role assumed by {lambda_name}",
            path=f"/{stack.name}/",
            trust=Trust(services=["lambda"]),
            managed_policy_arns=[lambda_policy.arn],
        )

        # Get first part of invalidation lambda code from a file
        with open(
                os.path.join(
                    os.path.dirname(os.path.abspath(__file__)),
                    "data",
                    "lambda_invalidate_head.py",
                )) as lf:
            lambda_code = lf.read().splitlines()

        # Complete it with the part depending on the distribution id
        lambda_code.extend([
            "    client.create_invalidation(",
            Sub(
                "        DistributionId='${distribution_id}',",
                distribution_id=self.id,
            ),
            "        InvalidationBatch={",
            "            'Paths': {'Quantity': 1, 'Items': path},",
            "            'CallerReference': str(time.time()),",
            "        },",
            "    )",
        ])
        lambda_function = Function(
            name_to_id(lambda_name),
            description=(f"lambda invalidating cloudfront cache when "
                         f"{self.bucket.name} objects are updated"),
            handler="invalidate.handler",
            role=lambda_role,
            code_zipfile=Join("\n", lambda_code),
            runtime="python3.9",
        )

        sns_topic = Topic(name=f"{self.name}-invalidation-topic")
        sns_topic.add_lambda_subscription(
            function=lambda_function,
            delivery_policy={"throttlePolicy": {
                "maxReceivesPerSecond": 10
            }},
        )
        # Trigger the invalidation when a file is updated
        self.bucket.add_notification_configuration(event="s3:ObjectCreated:*",
                                                   target=sns_topic,
                                                   permission_suffix=self.name)

        result = [
            resource for construct in (lambda_policy, lambda_role,
                                       lambda_function, sns_topic)
            for resource in construct.resources(stack)
        ]
        return result
Пример #9
0
environment = Environment(
    ComputeType='BUILD_GENERAL1_SMALL',
    Image='aws/codebuild/standard:2.0',
    Type='LINUX_CONTAINER',
    EnvironmentVariables=[
        {
            'Name': 'REPOSITORY_NAME',
            'Value': 'helloworld'
        },
        {
            'Name':
            'REPOSITORY_URI',
            'Value':
            Join("", [
                Ref("AWS::AccountId"), ".dkr.ecr.",
                Ref("AWS::Region"), ".amazonaws.com", "/", "helloworld"
            ])
        },
    ],
    PrivilegedMode=True,
)

buildspec = """version: 0.1
phases:
  pre_build:
    commands:
      - aws codepipeline get-pipeline-state --name "${CODEBUILD_INITIATOR##*/}" --query stageStates[?actionStates[0].latestExecution.externalExecutionId==\`$CODEBUILD_BUILD_ID\`].latestExecution.pipelineExecutionId --output=text > /tmp/execution_id.txt
      - aws codepipeline get-pipeline-execution --pipeline-name "${CODEBUILD_INITIATOR##*/}" --pipeline-execution-id $(cat /tmp/execution_id.txt) --query 'pipelineExecution.artifactRevisions[0].revisionId' --output=text > /tmp/tag.txt
      - printf "%s:%s" "$REPOSITORY_URI" "$(cat /tmp/tag.txt)" > /tmp/build_tag.txt
      - printf '{"tag":"%s"}' "$(cat /tmp/tag.txt)" > /tmp/build.json
      - $(aws ecr get-login --no-include-email)
Пример #10
0
    def add_resources(self):
        """Add ASG to template."""
        template = self.template
        variables = self.get_variables()

        role_policy_statements = [
            Statement(Action=[awacs.aws.Action('elasticloadbalancing', '*')],
                      Effect=Allow,
                      Resource=['*']),
            Statement(Action=[
                awacs.ssm.GetParameter,
                awacs.ec2.DescribeInstances,
            ],
                      Effect=Allow,
                      Resource=["*"]),
        ]

        targetgrouparnsomitted = 'TargetGroupARNsOmitted'
        template.add_condition(
            targetgrouparnsomitted,
            Equals(Join('', variables['TargetGroupARNs'].ref), ''))

        # Resources
        server_role = template.add_resource(
            iam.Role(
                'ServerServerRole',
                AssumeRolePolicyDocument=Policy(
                    Version='2012-10-17',
                    Statement=[
                        Statement(Effect=Allow,
                                  Action=[awacs.sts.AssumeRole],
                                  Principal=Principal('Service',
                                                      ['ec2.amazonaws.com']))
                    ]),
                ManagedPolicyArns=variables['AppPolicies'].ref,
                Path='/',
                Policies=[
                    iam.Policy(PolicyName=Join('-', [
                        variables['Company'].ref, variables['Application'].ref,
                        'app-role', variables['Environment'].ref
                    ]),
                               PolicyDocument=Policy(
                                   Version='2012-10-17',
                                   Statement=role_policy_statements)),
                ]))

        server_profile = template.add_resource(
            iam.InstanceProfile('ServerInstanceProfile',
                                Path='/',
                                Roles=[Ref(server_role)]))

        server_launch_config = template.add_resource(
            autoscaling.LaunchConfiguration(
                'LaunchConfig',
                IamInstanceProfile=Ref(server_profile),
                ImageId=variables['AppAMI'].ref,
                InstanceType=variables['AppInstanceType'].ref,
                InstanceMonitoring=True,
                KeyName=variables['KeyName'].ref,
                SecurityGroups=variables['AppSecurityGroups'].ref,
                UserData=variables['UserData']))
        asg_tags = [
            autoscaling.Tag(
                'Name',
                Join('-', [
                    variables['Company'].ref, variables['Application'].ref,
                    variables['Role'].ref, variables['Environment'].ref
                ]), True),
            autoscaling.Tag('Application', variables['Application'].ref, True),
            autoscaling.Tag('AutoAlarmCreation', 'True', True),
            autoscaling.Tag('Company', variables['Company'].ref, True),
            autoscaling.Tag('Environment', variables['Environment'].ref, True),
            # autoscaling.Tag('TechOwner',
            #                 variables['TechOwner'].ref, True),
            autoscaling.Tag('TechOwnerEmail', variables['TechOwnerEmail'].ref,
                            True),
            autoscaling.Tag('Backup', variables['Backup'].ref, True),
            autoscaling.Tag('BackupHourly', variables['BackupHourly'].ref,
                            True),
            # autoscaling.Tag('DataClassification',
            #                 variables['DataClassification'].ref, True),
            autoscaling.Tag('StatelessHa', variables['StatelessHaEnabled'].ref,
                            True),
            autoscaling.Tag('MSBuildConfiguration',
                            variables['MSBuildConfiguration'].ref, True),
        ]
        optional_tags = ['Role', 'Service']
        for tag in optional_tags:
            if variables[tag].value != '':
                asg_tags.append(autoscaling.Tag(tag, variables[tag].ref, True))

        auto_deploy = variables['ASGAutoDeploy'].value
        if auto_deploy == 'true':
            update_policy = UpdatePolicy(
                AutoScalingRollingUpdate=AutoScalingRollingUpdate(
                    PauseTime=variables['AsgPauseTime'].ref,
                    MinInstancesInService=variables['MinInstancesInService'].
                    ref,
                    MaxBatchSize='1',
                ))

        else:
            update_policy = UpdatePolicy(AutoScalingRollingUpdate=NoValue)

        server_asg = template.add_resource(
            autoscaling.AutoScalingGroup(
                'AutoScaleGroup',
                AutoScalingGroupName=Join('-', [
                    variables['Company'].ref, variables['Application'].ref,
                    variables['Role'].ref, variables['Environment'].ref
                ]),
                UpdatePolicy=update_policy,
                MinSize=variables['ASGMinValue'].ref,
                MaxSize=variables['ASGMaxValue'].ref,
                HealthCheckGracePeriod=variables['HealthCheckGracePeriod'].ref,
                HealthCheckType=variables['HealthCheckType'].ref,
                MetricsCollection=[
                    autoscaling.MetricsCollection(Granularity='1Minute')
                ],
                LaunchConfigurationName=Ref(server_launch_config),
                Tags=asg_tags,
                TargetGroupARNs=If(targetgrouparnsomitted, Ref('AWS::NoValue'),
                                   variables['TargetGroupARNs'].ref),
                VPCZoneIdentifier=variables['AppSubnets'].ref))

        template.add_output(
            Output(
                'ASG',
                Description='Name of autoscaling group',
                Value=Ref(server_asg),
            ))
Пример #11
0
#     SubnetIds=Ref(subnet),
# ))

mydb = t.add_resource(
    DBInstance(
        "MyDB",
        DBName=Ref(dbname),
        AllocatedStorage=Ref(dballocatedstorage),
        DBInstanceClass=Ref(dbclass),
        Engine=Ref(dbengine),
        EngineVersion=Ref(dbengineversion),
        MasterUsername=Ref(dbuser),
        MasterUserPassword=Ref(dbpassword),
        DBSubnetGroupName=Ref(mydbsubnetgroup),
        VPCSecurityGroups=[Ref(myvpcsecuritygroup)],
    ))

t.add_output(
    Output("JDBCConnectionString",
           Description="JDBC connection string for database",
           Value=Join("", [
               "postgresql://",
               Ref(dbuser), ":",
               Ref(dbpassword), "@",
               GetAtt("MyDB", "Endpoint.Address"), ":",
               GetAtt("MyDB", "Endpoint.Port"), "/",
               Ref(dbname)
           ])))

print(t.to_json())
write_to_file("templates/rds-params.yaml", t.to_yaml(clean_up=True))
t = Template()

# Create a Lambda function that will be mapped
code = [
    "var response = require('cfn-response');",
    "exports.handler = function(event, context) {",
    "   context.succeed('foobar!');",
    "   return 'foobar!';",
    "};",
]

# Create the Lambda function
foobar_function = t.add_resource(
    Function(
        "FoobarFunction",
        Code=Code(ZipFile=Join("", code)),
        Handler="index.handler",
        Role=GetAtt("LambdaExecutionRole", "Arn"),
        Runtime="nodejs",
    ))

# Create the Event Target
foobar_target = Target("FoobarTarget",
                       Arn=GetAtt('FoobarFunction', 'Arn'),
                       Id="FooBarFunction1")

# Create the Event Rule
rule = t.add_resource(
    Rule("FoobarRule",
         EventPattern={
             "source": ["aws.ec2"],
                          "20")),
                   SnapshotId=If(UseEBSSnapshot,
                                 Select(str(i), Ref(EBSSnapshotId)), NoValue),
                   Iops=If(UseEBSPIOPS, Select(str(i), Ref(VolumeIOPS)),
                           NoValue),
                   Encrypted=If(UseEBSEncryption,
                                Select(str(i), Ref(EBSEncryption)), NoValue),
                   KmsKeyId=If(UseEBSKMSKey, Select(str(i), Ref(EBSKMSKeyId)),
                               NoValue),
                   Condition=CreateVol))

outputs = [None] * numberOfVol
volToReturn = [None] * numberOfVol
for i in range(numberOfVol):
    volToReturn[i] = If(UseExistingEBSVolume[i],
                        Select(str(i), Ref(EBSVolumeId)), Ref(v[i]))
    if i == 0:
        outputs[i] = volToReturn[i]
    else:
        outputs[i] = If(UseVol[i], Join(",", volToReturn[:(i + 1)]),
                        outputs[i - 1])

t.add_output(
    Output("Volumeids",
           Description="Volume IDs of the resulted EBS volumes",
           Value=outputs[numberOfVol - 1]))

jsonFilePath = "targetPath"
outputfile = open(jsonFilePath, "w")
outputfile.write(t.to_json())
outputfile.close()
Пример #14
0
				ToPort="80",
				CidrIp="0.0.0.0/0",
			),
		],
		
	)
)

Function = t.add_resource(
	Function(
		"Function",
		FunctionName="TestResultSNS",
		Role=LambdaRole,
		Description="Send Test Result Email",
		Runtime="python3.7",
		Code=Code(
			ZipFile=Join("", code)
		),
		Handler="index.lambda_handler",
		MemorySize=Ref(MemorySize),
		Timeout=Ref(Timeout),
		#VpcConfig=VpcConfig(
		#	SecurityGroupIds=[Ref(security_param)],
		#	SubnetIds=[SubnetId],
		#)
	)
)


print(t.to_json())
Пример #15
0
common_bucket_conf = dict(
    VersioningConfiguration=VersioningConfiguration(Status="Enabled"),
    DeletionPolicy="Retain",
    CorsConfiguration=CorsConfiguration(
        CorsRules=[
            CorsRules(
                AllowedOrigins=Split(
                    ";",
                    Join(
                        "",
                        [
                            "https://",
                            domain_name,
                            If(
                                no_alt_domains,
                                # if we don't have any alternate domains, return an empty string
                                "",
                                # otherwise, return the ';https://' that will be needed by the first domain
                                ";https://",
                            ),
                            # then, add all the alternate domains, joined together with ';https://'
                            Join(";https://", domain_name_alternates),
                            # now that we have a string of origins separated by ';', Split() is used to make it into a list again
                        ])),
                AllowedMethods=[
                    "POST",
                    "PUT",
                    "HEAD",
                    "GET",
                ],
                AllowedHeaders=[
                    "*",
Пример #16
0
                },
                'Action': 'es:*',
                'Resource': '*',
                'Condition': {
                    'IpAddress': {
                        'aws:SourceIp': PublicCidrIp
                    }
                }
            }]
        },
    ))

t.add_output(
    Output(
        "DomainArn",
        Description="Domain Arn",
        Value=GetAtt("ElasticsearchCluster", "DomainArn"),
        Export=Export("LogsDomainArn"),
    ))

t.add_output(
    Output("Kibana",
           Description="Kibana url",
           Value=Join("", [
               "https://",
               GetAtt("ElasticsearchCluster", "DomainEndpoint"),
               "/_plugin/kibana/"
           ])))

print t.to_json()
Пример #17
0
    def add_resources(self):
        """Add resources to template."""
        template = self.template
        variables = self.get_variables()

        additional_tags = {}
        for key in variables['OtherTags'].iterkeys():
            if isinstance(variables['OtherTags'][key], dict):
                tag_name = variables['OtherTags'][key]['Name']
            else:
                tag_name = key
            additional_tags[tag_name] = variables[key].ref

        rdsclientsecuritygroup = template.add_resource(
            ec2.SecurityGroup(
                'RdsClientSecurityGroup',
                VpcId=variables['VpcId'].ref,
                SecurityGroupEgress=[
                    ec2.SecurityGroupRule(IpProtocol='-1',
                                          FromPort='0',
                                          ToPort='65535',
                                          CidrIp='0.0.0.0/0')
                ],
                GroupDescription=Join('-', [
                    variables['ApplicationName'].ref, 'RdsClientSecurityGroup',
                    variables['EnvironmentName'].ref
                ]),
                Tags=Tags(Name=Join('-', [
                    'rds-clients', variables['ApplicationName'].ref,
                    variables['EnvironmentName'].ref
                ]),
                          Environment=variables['EnvironmentName'].ref,
                          Application=variables['ApplicationName'].ref,
                          **additional_tags)))
        template.add_output(
            Output('RdsClientSecurityGroup',
                   Description='The ID of the RDS client security group '
                   'associated with the environment',
                   Value=Ref(rdsclientsecuritygroup)))

        rdsserversecuritygroup = template.add_resource(
            ec2.SecurityGroup(
                'RdsServerSecurityGroup',
                SecurityGroupIngress=[
                    ec2.SecurityGroupRule(
                        IpProtocol='tcp',
                        FromPort=FindInMap('RdsMap',
                                           variables['RdsEngineType'].ref,
                                           'RdsPort'),
                        ToPort=FindInMap('RdsMap',
                                         variables['RdsEngineType'].ref,
                                         'RdsPort'),
                        SourceSecurityGroupId=Ref(rdsclientsecuritygroup))
                ],
                VpcId=variables['VpcId'].ref,
                SecurityGroupEgress=[
                    ec2.SecurityGroupRule(IpProtocol='-1',
                                          FromPort='0',
                                          ToPort='65535',
                                          CidrIp='0.0.0.0/0')
                ],
                GroupDescription=Join('-', [
                    variables['ApplicationName'].ref, 'RdsServerSecurityGroup',
                    variables['EnvironmentName'].ref
                ]),
                Tags=Tags(Name=Join('-', [
                    'rds-server', variables['ApplicationName'].ref,
                    variables['EnvironmentName'].ref
                ]),
                          Environment=variables['EnvironmentName'].ref,
                          Application=variables['ApplicationName'].ref,
                          **additional_tags)))
        template.add_output(
            Output(rdsserversecuritygroup.title,
                   Description='The ID of the RDS server security group '
                   'associated with the rds',
                   Value=Ref(rdsserversecuritygroup)))

        template.add_resource(
            ec2.SecurityGroupIngress(
                'VpnSgIngress',
                Condition='VpnAccessEnabled',
                GroupId=Ref(rdsserversecuritygroup),
                IpProtocol='tcp',
                FromPort=FindInMap('RdsMap', variables['RdsEngineType'].ref,
                                   'RdsPort'),
                ToPort=FindInMap('RdsMap', variables['RdsEngineType'].ref,
                                 'RdsPort'),
                SourceSecurityGroupId=variables['VPNSecurityGroup'].ref))

        rdsdatabaseinstance = template.add_resource(
            rds.DBInstance(
                'RdsDatabaseInstance',
                DBParameterGroupName=If(
                    'CustomParameterGroup',
                    variables['ParameterGroupName'].ref,
                    FindInMap('RdsMap', variables['RdsEngineType'].ref,
                              'RdsParameterGroupName')),
                AllowMajorVersionUpgrade=variables['AllowMajorVersionUpgrade'].
                ref,  # noqa
                MasterUsername=FindInMap('RdsMap',
                                         variables['RdsEngineType'].ref,
                                         'RdsMasterUsername'),
                LicenseModel=FindInMap('RdsMap',
                                       variables['RdsEngineType'].ref,
                                       'RdsLicenseModel'),
                VPCSecurityGroups=[Ref(rdsserversecuritygroup)],
                Engine=FindInMap('RdsMap', variables['RdsEngineType'].ref,
                                 'RdsEngine'),
                MultiAZ=variables['MultiAZ'].ref,
                Tags=Tags(Name=Join('-', [
                    variables['ApplicationName'].ref,
                    variables['EnvironmentName'].ref
                ]),
                          Environment=variables['EnvironmentName'].ref,
                          Application=variables['ApplicationName'].ref,
                          **additional_tags),
                AutoMinorVersionUpgrade=variables['AutoMinorVersionUpgrade'].
                ref,  # noqa
                PreferredBackupWindow='03:00-04:00',
                AllocatedStorage=variables['RdsAllocatedStorage'].ref,
                DBSubnetGroupName=variables['DBSubnetGroupName'].ref,
                PreferredMaintenanceWindow='sat:06:00-sat:07:00',
                EngineVersion=FindInMap('RdsMap',
                                        variables['RdsEngineType'].ref,
                                        'RdsEngineVersion'),
                BackupRetentionPeriod=variables['BackupRetentionDays'].ref,
                StorageType='gp2',
                MasterUserPassword=variables['DBPassword'].ref,
                KmsKeyId=If('KmsKeyEnabled', variables['KmsKey'].ref,
                            Ref('AWS::NoValue')),
                StorageEncrypted=variables['Encrypted'].ref,
                DBInstanceClass=variables['RdsInstanceClass'].ref,
                Port=FindInMap('RdsMap', variables['RdsEngineType'].ref,
                               'RdsPort'),
                DBInstanceIdentifier=If(
                    'IdentifierSpecified',
                    variables['RdsInstanceIdentifier'].ref,
                    Join('-', [
                        variables['ApplicationName'].ref,
                        variables['EnvironmentName'].ref
                    ])),
                DBSnapshotIdentifier=If('SnapshotSpecified',
                                        variables['RdsSnapshotIdentifier'].ref,
                                        Ref('AWS::NoValue'))))

        template.add_resource(
            cloudwatch.Alarm(
                'HighCPUAlarm',
                Condition='SnsTopicSpecified',
                ActionsEnabled='true',
                AlarmActions=variables['SNSTopic'].ref,
                AlarmDescription='CPU Utilization Alarm for RDS',
                AlarmName=Join('-', [
                    variables['EnvironmentName'].ref, 'CPUUtilization', 'RDS',
                    'alarm'
                ]),
                ComparisonOperator=variables['CpuComparisonOperator'].ref,
                OKActions=variables['SNSTopic'].ref,
                EvaluationPeriods=variables['CpuEvaluationPeriods'].ref,
                MetricName='CPUUtilization',
                Namespace='AWS/RDS',
                Period=variables['CpuPeriod'].ref,
                Statistic=variables['CpuStatistic'].ref,
                Threshold=variables['CpuThreshold'].ref))

        template.add_resource(
            cloudwatch.Alarm(
                'FreeStorageSpace',
                Condition='SnsTopicSpecified',
                ActionsEnabled='true',
                AlarmActions=variables['SNSTopic'].ref,
                AlarmDescription='Disk Space Alarm for RDS',
                AlarmName=Join('-', [
                    variables['EnvironmentName'].ref, 'FreeDiskSpace', 'RDS',
                    'alarm'
                ]),
                ComparisonOperator=variables['DiskComparisonOperator'].ref,
                OKActions=variables['SNSTopic'].ref,
                EvaluationPeriods=variables['DiskEvaluationPeriods'].ref,
                MetricName='FreeStorageSpace',
                Namespace='AWS/RDS',
                Period=variables['DiskPeriod'].ref,
                Statistic=variables['DiskStatistic'].ref,
                Threshold=variables['DiskThreshold'].ref))

        template.add_resource(
            cloudwatch.Alarm(
                'FreeableMemory',
                Condition='SnsTopicSpecified',
                ActionsEnabled='true',
                AlarmActions=variables['SNSTopic'].ref,
                AlarmDescription='Free memory Alarm for RDS',
                AlarmName=Join('-', [
                    variables['EnvironmentName'].ref, 'FreeableMemory', 'RDS',
                    'alarm'
                ]),
                ComparisonOperator=variables['MemoryComparisonOperator'].ref,
                OKActions=variables['SNSTopic'].ref,
                EvaluationPeriods=variables['MemoryEvaluationPeriods'].ref,
                MetricName='FreeableMemory',
                Namespace='AWS/RDS',
                Period=variables['MemoryPeriod'].ref,
                Statistic=variables['MemoryStatistic'].ref,
                Threshold=variables['MemoryThreshold'].ref))
        template.add_output(
            Output(
                'RdsDatabaseInstance',
                Description='The name of the RDS instance for the environment',
                Value=GetAtt(rdsdatabaseinstance, 'Endpoint.Address')))
 PolicyName="CodePipelineServicePolicy",
 PolicyDocument={
     "Version":
     "2012-10-17",
     "Statement": [{
         "Effect": "Allow",
         "Action": "iam:PassRole",
         "Resource": "*"
     }, {
         "Effect":
         "Allow",
         "Action": ["codebuild:StartBuild", "codebuild:BatchGetBuilds"],
         "Resource": [
             Join("", [
                 "arn:aws:codebuild:",
                 Ref('AWS::Region'), ":",
                 Ref('AWS::AccountId'), ":project/",
                 Ref(CodeBuildProject)
             ])
         ]
     }, {
         "Effect":
         "Allow",
         "Action": [
             "codecommit:UploadArchive", "codecommit:GetCommit",
             "codecommit:GetUploadArchiveStatus",
             "codecommit:GetBranch", "codecommit:CancelUploadArchive"
         ],
         "Resource": [
             Join("", [
                 "arn:aws:codecommit:",
                 Ref('AWS::Region'), ":",
                ToPort="22",
                CidrIp=myIP,
            ),
            ec2.SecurityGroupRule(
                IpProtocol="tcp",
                FromPort=ApplicationPort,
                ToPort=ApplicationPort,
                CidrIp=myIP,
            ),
        ],
    ))
ud = Base64(
    Join('\n', [
        "#!/bin/bash", "yum install --enablerepo=epel -y git",
        "pip install --upgrade pip", "ln -s /usr/local/bin/pip /usr/bin/pip",
        "pip install ansible", AnsiblePullCmd,
        "echo '*/10 * * * * root {}' > /etc/cron.d/ansible.pull".format(
            AnsiblePullCmd)
    ]))

t.add_resource(
    IAMPolicy(
        "Policy",
        PolicyName="AllowS3",
        PolicyDocument=Policy(Statement=[
            Statement(Effect=Allow, Action=[Action("s3", "*")], Resource=["*"])
        ]),
        Roles=[Ref("Role")]))

t.add_resource(
    Role("Role",
t.add_description("Effective DevOps in AWS: ECS service - Helloworld")

t.add_parameter(
    Parameter("Tag",
              Type="String",
              Default="latest",
              Description="Tag to deploy"))

t.add_resource(
    TaskDefinition(
        "task",
        ContainerDefinitions=[
            ContainerDefinition(
                Image=Join("", [
                    Ref("AWS::AccountId"), ".dkr.ecr.",
                    Ref("AWS::Region"), ".amazonaws.com", "/",
                    ImportValue("helloworld-repo"), ":",
                    Ref("Tag")
                ]),
                Memory=32,
                Cpu=256,
                Name="helloworld",
                PortMappings=[ecs.PortMapping(ContainerPort=3000)])
        ],
    ))

t.add_resource(
    Role(
        "ServiceRole",
        AssumeRolePolicyDocument=Policy(Statement=[
            Statement(Effect=Allow,
                      Action=[AssumeRole],
Пример #21
0
# Generate the bucketpolicy to allow user to access your static website
BucketPolicyStaticWebsite = t.add_resource(
    BucketPolicy(
        "BucketPolicyStaticWebsite",
        PolicyDocument={
            "Version":
            IAM_VERSION,
            "Statement": [{
                "Sid":
                "ReadOnly",
                "Effect":
                "Allow",
                "Principal":
                "*",
                "Action": ["s3:GetObject"],
                "Resource": [Join("", [GetAtt(WebsiteBucket, "Arn"), "/*"])]
            }]
        },
        Bucket=Ref(WebsiteBucket),
    ))

# Generate the bucket for storing build artifacts
BuildArtifacts = t.add_resource(Bucket(ARTIFACT_BUCKET))

# Generate the codecommit repository
CodeCommit = t.add_resource(Repository(GIT_REPO, RepositoryName=GIT_REPO))

# Generate the service policy and the service role for CodeBuild
CodeBuildServiceRole = t.add_resource(
    Role(
        "CodeBuildServiceRole",
Пример #22
0
 def test_validate_with_a_join_default(self):
     Parameter('test', Type='String', Default=Join('',
                                                   ['a', 'b'])).validate()
    Parameter(
        "S3DNSName",
        Description="The DNS name of an existing S3 bucket to use as the "
        "Cloudfront distribution origin",
        Type="String",
    ))

myDistribution = t.add_resource(
    Distribution("myDistribution",
                 DistributionConfig=DistributionConfig(
                     Origins=[
                         Origin(Id="Origin 1",
                                DomainName=Ref(s3dnsname),
                                S3OriginConfig=S3OriginConfig())
                     ],
                     DefaultCacheBehavior=DefaultCacheBehavior(
                         TargetOriginId="Origin 1",
                         ForwardedValues=ForwardedValues(QueryString=False),
                         ViewerProtocolPolicy="allow-all"),
                     Enabled=True,
                     HttpVersion='http2')))

t.add_output([
    Output("DistributionId", Value=Ref(myDistribution)),
    Output("DistributionName",
           Value=Join(
               "", ["http://", GetAtt(myDistribution, "DomainName")])),
])

print(t.to_json())
Пример #24
0
                ToPort="22",
                CidrIp=PublicCidrIp,
            ),
            ec2.SecurityGroupRule(
                IpProtocol="tcp",
                FromPort=ApplicationPort,
                ToPort=ApplicationPort,
                CidrIp="0.0.0.0/0",
            ),
        ],
    ))

ud = Base64(
    Join('\n', [
        "#!/bin/bash", "yum install --enablerepo=epel -y git",
        "pip install ansible", AnsiblePullCmd,
        "echo '*/10 * * * * {}' > /etc/cron.d/ansible-pull".format(
            AnsiblePullCmd)
    ]))

t.add_resource(
    Role("Role",
         AssumeRolePolicyDocument=Policy(Statement=[
             Statement(Effect=Allow,
                       Action=[AssumeRole],
                       Principal=Principal("Service", ["ec2.amazonaws.com"]))
         ])))

t.add_resource(
    InstanceProfile("InstanceProfile", Path="/", Roles=[Ref("Role")]))

t.add_resource(
Пример #25
0
def createCouchbaseInternetGateway(t):
    couchbaseInternetGateway = t.add_resource(ec2.InternetGateway(
        'GATEWAY',
         Tags=Tags(Name=Join('', ['gateway-scalability-', Ref('AWS::Region')]))
    ))
    return couchbaseInternetGateway
                ToPort="22",
                CidrIp=PublicCidrIp,
            ),
            ec2.SecurityGroupRule(
                IpProtocol="tcp",
                FromPort=ApplicationPort,
                ToPort=ApplicationPort,
                CidrIp="0.0.0.0/0",
            ),
        ],
    ))

ud = Base64(
    Join('\n', [
        "#!/bin/bash", "sudo yum install --enablerepo=epel -y nodejs",
        "wget http://bit.ly/2vESNuc -O /home/ec2-user/helloworld.js",
        "wget http://bit.ly/2vVvT18 -O /etc/init/helloworld.conf",
        "start helloworld"
    ]))

t.add_resource(
    ec2.Instance(
        "instance",
        ImageId="ami-cfe4b2b0",
        InstanceType="t2.micro",
        SecurityGroups=[Ref("SecurityGroup")],
        KeyName=Ref("KeyPair"),
        UserData=ud,
    ))

t.add_output(
    Output(
Пример #27
0
def create_primary_template():
    template = Template(
        Description="Root stack for VERY STRONG Lambda function")

    image_digest = template.add_parameter(
        Parameter("ImageDigest", Type="String", Default=""))

    is_image_digest_defined = "IsImageDigestDefined"
    template.add_condition(is_image_digest_defined,
                           Not(Equals(Ref(image_digest), "")))

    artifact_repository = template.add_resource(
        Repository(
            "ArtifactRepository",
            ImageTagMutability="MUTABLE",
            LifecyclePolicy=LifecyclePolicy(LifecyclePolicyText=json.dumps(
                {
                    "rules": [{
                        "rulePriority": 1,
                        "selection": {
                            "tagStatus": "untagged",
                            "countType": "imageCountMoreThan",
                            "countNumber": 3,
                        },
                        "action": {
                            "type": "expire",
                        },
                    }]
                },
                indent=None,
                sort_keys=True,
                separators=(",", ":"),
            )),
        ))

    artifact_repository_url = Join(
        "/",
        [
            Join(
                ".",
                [
                    AccountId,
                    "dkr",
                    "ecr",
                    Region,
                    URLSuffix,
                ],
            ),
            Ref(artifact_repository),
        ],
    )
    image_uri = Join("@", [artifact_repository_url, Ref(image_digest)])

    artifact_bucket = template.add_resource(
        Bucket(
            "ArtifactBucket",
            BucketEncryption=BucketEncryption(
                ServerSideEncryptionConfiguration=[
                    ServerSideEncryptionRule(
                        BucketKeyEnabled=True,
                        ServerSideEncryptionByDefault=
                        ServerSideEncryptionByDefault(
                            SSEAlgorithm="aws:kms",
                            KMSMasterKeyID=Join(":", [
                                "arn", Partition, "kms", Region, AccountId,
                                "alias/aws/s3"
                            ]),
                        ),
                    )
                ], ),
            LifecycleConfiguration=LifecycleConfiguration(Rules=[
                LifecycleRule(
                    AbortIncompleteMultipartUpload=
                    AbortIncompleteMultipartUpload(DaysAfterInitiation=3, ),
                    Status="Enabled",
                ),
            ], ),
            PublicAccessBlockConfiguration=PublicAccessBlockConfiguration(
                BlockPublicAcls=True,
                BlockPublicPolicy=True,
                IgnorePublicAcls=True,
                RestrictPublicBuckets=True,
            ),
        ))

    deployment_id_stack = template.add_resource(
        Stack(
            "DeploymentId",
            TemplateURL=common.get_template_s3_url(
                Ref(artifact_bucket), deployment_id.create_template()),
            Parameters={
                "ArtifactBucket": Ref(artifact_bucket),
            },
            Condition=is_image_digest_defined,
        ))

    availability_zones_stack = template.add_resource(
        Stack(
            "AvailabilityZones",
            TemplateURL=common.get_template_s3_url(
                Ref(artifact_bucket), availability_zones.create_template()),
            Parameters={
                "DeploymentId": GetAtt(deployment_id_stack, "Outputs.Value"),
                "ImageUri": image_uri,
            },
            Condition=is_image_digest_defined,
        ))

    vpc_stack = template.add_resource(
        Stack(
            "Vpc",
            TemplateURL=common.get_template_s3_url(Ref(artifact_bucket),
                                                   vpc.create_template()),
            Parameters={
                "AvailabilityZones":
                GetAtt(availability_zones_stack, "Outputs.AvailabilityZones"),
            },
            Condition=is_image_digest_defined,
        ))

    lambda_eip_allocator_stack = template.add_resource(
        Stack(
            "LambdaEipAllocator",
            TemplateURL=common.get_template_s3_url(
                Ref(artifact_bucket), lambda_eip_allocator.create_template()),
            Parameters={
                "DeploymentId": GetAtt(deployment_id_stack, "Outputs.Value"),
                "VpcId": GetAtt(vpc_stack, "Outputs.VpcId"),
                "ImageUri": image_uri,
            },
            Condition=is_image_digest_defined,
        ))

    elastic_file_system_stack = template.add_resource(
        Stack(
            "ElasticFileSystem",
            TemplateURL=common.get_template_s3_url(
                Ref(artifact_bucket), elastic_file_system.create_template()),
            Parameters={
                "VpcId":
                GetAtt(vpc_stack, "Outputs.VpcId"),
                "SubnetIds":
                GetAtt(vpc_stack, "Outputs.SubnetIds"),
                "AvailabilityZones":
                GetAtt(availability_zones_stack, "Outputs.AvailabilityZones"),
            },
            Condition=is_image_digest_defined,
        ))

    lambda_function_stack = template.add_resource(
        Stack(
            "LambdaFunction",
            TemplateURL=common.get_template_s3_url(
                Ref(artifact_bucket), lambda_function.create_template()),
            Parameters={
                "DeploymentId":
                GetAtt(deployment_id_stack, "Outputs.Value"),
                "VpcId":
                GetAtt(vpc_stack, "Outputs.VpcId"),
                "SubnetIds":
                GetAtt(vpc_stack, "Outputs.SubnetIds"),
                "FileSystemAccessPointArn":
                GetAtt(elastic_file_system_stack, "Outputs.AccessPointArn"),
                "ImageUri":
                image_uri,
            },
            DependsOn=[lambda_eip_allocator_stack],
            Condition=is_image_digest_defined,
        ))

    image_tagger_stack = template.add_resource(
        Stack(
            "ImageTagger",
            TemplateURL=common.get_template_s3_url(
                Ref(artifact_bucket), image_tagger.create_template()),
            Parameters={
                "DeploymentId": GetAtt(deployment_id_stack, "Outputs.Value"),
                "ArtifactRepository": Ref(artifact_repository),
                "DesiredImageTag": "current-cloudformation",
                "ImageDigest": Ref(image_digest),
                "ImageUri": image_uri,
            },
            DependsOn=list(template.resources),
            Condition=is_image_digest_defined,
        ))

    template.add_output(Output(
        "ArtifactBucket",
        Value=Ref(artifact_bucket),
    ))

    template.add_output(
        Output(
            "ArtifactRepositoryUrl",
            Value=artifact_repository_url,
        ))

    return template
t.add_resource(
    InstanceProfile(
        'EC2InstanceProfile',
        Roles=[Ref('EcsClusterRole')],
    ))

t.add_resource(
    LaunchConfiguration(
        'ContainerInstances',
        UserData=Base64(
            Join('', [
                "#!/bin/bash -xe\n", "echo ECS_CLUSTER=",
                Ref('ECSCluster'), " >> /etc/ecs/ecs.config\n",
                "yum install -y aws-cfn-bootstrap\n",
                "/opt/aws/bin/cfn-signal -e $? ", "         --stack ",
                Ref('AWS::StackName'),
                "         --resource ECSAutoScalingGroup ",
                "         --region ",
                Ref('AWS::Region'), "\n"
            ])),
        ImageId='ami-28456852',
        KeyName=Ref("KeyPair"),
        SecurityGroups=[Ref("SecurityGroup")],
        IamInstanceProfile=Ref('EC2InstanceProfile'),
        InstanceType='t2.micro',
        AssociatePublicIpAddress='true',
    ))

t.add_resource(
    AutoScalingGroup(
        'ECSAutoScalingGroup',
Пример #29
0
import tropo_mods.auto_ec2 as auto_ec2
from troposphere import Template, Join

t = Template()
my_instance = auto_ec2.AutoEc2(t, ami_name="ami-0eaec5838478eb0ba", asg=False)

my_instance.add_sg(port="3000", cidrIp="0.0.0.0/0")
my_instance.add_ud(Join("", ["#!/bin/bash -xe\n", "./home/ec2-user/my-app &"]))
my_instance.add_profile(access_to="codepipeline:*")

my_instance.print_to_yaml()
Пример #30
0
keypair = t.add_parameter(
    Parameter(
        "KeyName",
        Description=
        "Name of the SSH key pair that will be used to access the instance",
        Type="String"))
instance = ec2.Instance("Webserver")
instance.ImageId = "ami-e689729e"
instance.InstanceType = "t2.micro"
instance.SecurityGroups = [Ref(sg)]
instance.KeyName = Ref(keypair)
ud = Base64(
    Join('\n', [
        "#!/bin/bash", "sudo yum -y install httpd",
        "sudo echo '<html><body><h1>Welcome to DevOps on AWS</h1></body></html>' > /var/www/html/test.html",
        "sudo service httpd start"
        "sudo chkconfig httpd on"
    ]))
instance.UserData = ud

t.add_resource(instance)

t.add_output(
    Output("InstanceAccess",
           Description="Command to use to access the instance using SSH",
           Value=Join("", [
               "ssh -i ~/.ssh/Lampkey.pem ec2-user@",
               GetAtt(instance, "PublicDnsName")
           ])))

t.add_output(
Пример #31
0
    def add_spot_fleet(self, spot_fleet):
        """ Add spot fleet to stack """
        self.open_userdata()

        launch_specs = []
        default_security_groups = [
            SecurityGroups(GroupId=Ref('IncomingSg')),
            SecurityGroups(GroupId=ImportValue('{}-cluster:DBBadgeSg'.format(self.cluster.get('name')))),
        ]
        for group in self.cluster.get('security_groups', []):
            default_security_groups.append(
                SecurityGroups(GroupId=group)
            )

        for bid in spot_fleet['bids']:

            launch_specs.append(
                LaunchSpecifications(
                    BlockDeviceMappings=self.block_devices,
                    IamInstanceProfile=IamInstanceProfile(Arn=self.instance_role),
                    ImageId=self.ami,
                    InstanceType=bid.get('instance_type'),
                    KeyName=self.keypair,
                    SecurityGroups=default_security_groups,
                    SubnetId=Join(",", self.cluster.get('subnets')),
                    SpotPrice=str(bid.get('price')),
                    UserData=Base64(Sub(self.open_userdata())),
                    Monitoring=Monitoring(Enabled=True),
                    WeightedCapacity=bid.get('weight', 1),
                    TagSpecifications=[SpotFleetTagSpecification(
                        ResourceType='instance',
                        Tags=[
                            Tag("cluster", self.cluster.get('name')),
                            Tag("Name", self.cluster.get('name'))
                        ]
                    )]
                )
            )

        spot_fleet_role = Role(
            "SpotFleetRole",
            AssumeRolePolicyDocument={
                "Statement": [{
                    "Effect": "Allow",
                    "Action": "sts:AssumeRole",
                    "Principal": {"Service": "spotfleet.amazonaws.com"},
                }]
            },
            Policies=[
                Policy(
                    PolicyName="ec2-spot-fleet",
                    PolicyDocument={
                        "Statement": [{
                            "Effect": "Allow",
                            "Action": [
                                "ec2:Describe*",
                                "ec2:CancelSpotFleetRequests",
                                "ec2:CancelSpotInstanceRequests",
                                "ec2:ModifySpotFleetRequest",
                                "ec2:RequestSpotFleet",
                                "ec2:RequestSpotInstances",
                                "ec2:TerminateInstances",
                                "ec2:CreateTags",
                                "iam:PassRole",
                                "iam:ListRoles",
                                "iam:ListInstanceProfiles"
                            ],
                            "Resource": "*"
                        }]
                    }
                )
            ]
        )
        self.template.add_resource(spot_fleet_role)

        spot_resource = SpotFleet(
            "SpotFleet{}".format(sanitize_cfn_resource_name(spot_fleet.get('name'))),
            SpotFleetRequestConfigData=SpotFleetRequestConfigData(
                AllocationStrategy="diversified",
                IamFleetRole=GetAtt("SpotFleetRole", "Arn"),
                LaunchSpecifications=launch_specs,
                TargetCapacity=spot_fleet.get('desired_weight')
            )
        )

        self.template.add_resource(spot_resource)
        if self.instance_base.get('autoscaling'):
            add_scaling(spot_fleet, self.template, self.cluster.get('name'))