コード例 #1
0
def ssm_global():
    template = Template()

    ssm_role = iam.Role(
        'SsmRole',
        RoleName="SsmRole",
        ManagedPolicyArns=[
            "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore",
            "arn:aws:iam::aws:policy/AmazonS3FullAccess",
            "arn:aws:iam::aws:policy/AmazonEC2FullAccess"
        ],
        AssumeRolePolicyDocument=PolicyDocument(Statement=[
            Statement(Effect=Allow,
                      Action=[Action("sts", "AssumeRole")],
                      Principal=Principal("Service", "ec2.amazonaws.com"))
        ]))

    ssm_profile = iam.InstanceProfile('SsmProfile',
                                      Roles=[Ref(ssm_role)],
                                      InstanceProfileName="SsmProfile")

    template.add_resource(ssm_role)
    template.add_resource(ssm_profile)

    with open(
            os.path.dirname(os.path.realpath(__file__)) + '/ssm_global.yml',
            'w') as cf_file:
        cf_file.write(template.to_yaml())

    return template.to_yaml()
コード例 #2
0
ファイル: test_yaml.py プロジェクト: Arvoreen/troposphere
 def test_yaml_long_form(self):
     t = Template()
     t.add_resource(WaitCondition(
         "MyWaitCondition", Timeout=30, Handle=Sub(cond_string)))
     self.assertEqual(cond_normal, t.to_yaml())
     self.assertEqual(cond_long, t.to_yaml(long_form=True))
     self.assertEqual(cond_long, t.to_yaml(False, True))
     self.assertEqual(cond_clean, t.to_yaml(clean_up=True))
     self.assertEqual(cond_clean, t.to_yaml(True))
コード例 #3
0
ファイル: test_basic.py プロジェクト: zully/troposphere
 def test_UpdateReplacePolicy(self):
     t = Template()
     t.add_resource(Bucket(
         "S3Bucket",
         AccessControl=PublicRead,
         UpdateReplacePolicy='Retain',
     ))
     t.to_yaml()
     self.assertEqual(t.to_yaml(), test_updatereplacepolicy_yaml)
コード例 #4
0
 def test_yaml_long_form(self):
     t = Template()
     t.add_resource(
         WaitCondition("MyWaitCondition", Timeout=30, Handle=Sub(cond_string))
     )
     self.assertEqual(cond_normal, t.to_yaml())
     self.assertEqual(cond_long, t.to_yaml(long_form=True))
     self.assertEqual(cond_long, t.to_yaml(False, True))
     self.assertEqual(cond_clean, t.to_yaml(clean_up=True))
     self.assertEqual(cond_clean, t.to_yaml(True))
コード例 #5
0
    def create_stack_template(self):
        "sg-006bb1b84c5a7ea6c"
        ami_id="ami-0de53d8956e8dcf80"
        t = Template()
        instance = ec2.Instance("myinstance1")
        instance.ImageId = ami_id
        instance.SecurityGroupIds = [ "sg-006bb1b84c5a7ea6c" ]
        instance.KeyName = "eddie.christian"
        instance.InstanceType = "t1.micro"
        instance.Tags = self.instance_tags(1)

        instance2 = ec2.Instance("myinstance2")
        instance2.ImageId = ami_id
        instance2.SecurityGroupIds = [ "sg-006bb1b84c5a7ea6c" ]
        instance2.KeyName = "eddie.christian"
        instance2.InstanceType = "t1.micro"
        instance2.Tags = self.instance_tags(2)


        t.add_resource(instance)
        t.add_resource(instance2)

        print(t.to_yaml())
        with open(self.template_name, 'wb') as f:
            f.write(t.to_yaml())

        with open(self.template_name) as template_fileobj:
            template_data = template_fileobj.read()
        self.cf.validate_template(TemplateBody=template_data)
        params = {
            'StackName': self.stack_name,
            'TemplateBody': template_data,
            'Parameters': [],
        }

        try:
            if self._stack_exists():
                print('Updating {}'.format(self.stack_name))
                stack_result = self.cf.update_stack(**params)
                waiter = self.cf.get_waiter('stack_update_complete')
            else:
                print('Creating {}'.format(self.stack_name))
                stack_result = self.cf.create_stack(**params)
                waiter = self.cf.get_waiter('stack_create_complete')
            print("...waiting for stack to be ready...")
            waiter.wait(StackName=self.stack_name)
        except botocore.exceptions.ClientError as ex:
            error_message = ex.response['Error']['Message']
            if error_message == 'No updates are to be performed.':
                print("No changes")
            else:
                raise
コード例 #6
0
    def create_peering(self):

        template = Template()
        template.add_version('2010-09-09')

        source_vpc_name_formatted = ''.join(
            e for e in self.source_vpc_name if e.isalnum()).capitalize()

        target_vpc_name_formatted = ''.join(
            e for e in self.target_vpc_name if e.isalnum()).capitalize()

        vpc_peering_connection = template.add_resource(
            VPCPeeringConnection(
                '{}{}{}VpcPeering'.format(self.stage,source_vpc_name_formatted,target_vpc_name_formatted),
                VpcId=ImportValue("{}{}VpcId".format(self.stage,source_vpc_name_formatted)),
                PeerVpcId=ImportValue("{}{}VpcId".format(self.stage,target_vpc_name_formatted)),
                Tags=Tags(
                    Name="{}_{}_{}_peering".format(self.stage,source_vpc_name_formatted,target_vpc_name_formatted)
                )
            )
        )

        template.add_resource(
            Route(
                '{}{}PublicRoutePeeringRule'.format(self.stage,source_vpc_name_formatted),                
                VpcPeeringConnectionId=Ref(vpc_peering_connection),
                DestinationCidrBlock=self.target_cidr_block,
                RouteTableId=ImportValue("{}{}PublicRouteTableId".format(self.stage,source_vpc_name_formatted))
            )
        )

        template.add_resource(
            Route(
                '{}{}PrivateRoutePeeringRule'.format(self.stage,source_vpc_name_formatted),
                VpcPeeringConnectionId=Ref(vpc_peering_connection),
                DestinationCidrBlock=self.target_cidr_block,
                RouteTableId=ImportValue("{}{}PrivateRouteTableId".format(self.stage,source_vpc_name_formatted))
            )
        )

        template.add_resource(
            Route(
                '{}{}PublicRoutePeeringRule'.format(self.stage,target_vpc_name_formatted),
                VpcPeeringConnectionId=Ref(vpc_peering_connection),
                DestinationCidrBlock=self.source_cidr_block,
                RouteTableId=ImportValue("{}{}PublicRouteTableId".format(self.stage,target_vpc_name_formatted))
            )
        )

        template.add_resource(
            Route(
                '{}{}PrivateRoutePeeringRule'.format(self.stage,target_vpc_name_formatted),
                VpcPeeringConnectionId=Ref(vpc_peering_connection),
                DestinationCidrBlock=self.source_cidr_block,
                RouteTableId=ImportValue("{}{}PrivateRouteTableId".format(self.stage,target_vpc_name_formatted))
            )
        )

        f = open("modules/template_peer_vpcs.yaml", 'w')
        print(template.to_yaml(), file=f)
コード例 #7
0
def main(**kwargs):
    """ Create the CFN template and either write to screen or update/create boto3. """
    codebuild = Template()

    for job in config.sections():
        if 'CodeBuild:' in job:
            job_title = job.split(':')[1]
            service_role = build_role(template=codebuild,
                                      project_name=job_title).to_dict()
            # Pull the env out of the section, and use the snippet for the other values.
            if 'snippet' in config[job]:
                build_project(template=codebuild,
                              project_name=job_title,
                              section=config.get(job, 'snippet'),
                              service_role=service_role['Ref'],
                              raw_env=config.get(job, 'env'))
            else:
                build_project(template=codebuild,
                              project_name=job_title,
                              section=job,
                              service_role=service_role['Ref'])

    with (open("cfn/codebuild_test_projects.yml", 'w')) as fh:
        fh.write(codebuild.to_yaml())

    if args.dry_run:
        logging.debug('Dry Run: wrote cfn file, but not calling AWS.')
    else:
        print('Boto functionality goes here.')
コード例 #8
0
def dump_yaml(cfn_file):

    template = Template()

    vpc_cidr_param = template.add_parameter(
        Parameter(
            "vpcCidrParam",
            Description="string of vpc cidr block to use",
            Type="String",
        ))

    subnet_cidr_param = template.add_parameter(
        Parameter(
            "subnetCidrParam",
            Description="string of subnet cidr block to use",
            Type="String",
        ))

    resource_tags = Tags(Name=Sub("${AWS::StackName}"),
                         user="******",
                         stelligent_u_lesson='lesson-4-1',
                         stelligent_u_lab='lab-1')

    vpc = template.add_resource(
        ec2.VPC(
            "Vpc",
            CidrBlock=Ref(vpc_cidr_param),
            EnableDnsSupport=True,
            EnableDnsHostnames=True,
            InstanceTenancy="default",
            Tags=resource_tags,
        ))

    subnet = template.add_resource(
        ec2.Subnet(
            "Subnet",
            VpcId=Ref(vpc),
            CidrBlock=Ref(subnet_cidr_param),
            MapPublicIpOnLaunch=False,
            AvailabilityZone=Select(0, GetAZs()),
            Tags=resource_tags,
        ))

    template.add_output([
        Output(
            "vpcId",
            Description="InstanceId of the newly created EC2 instance",
            Value=Ref(vpc),
        ),
        Output(
            "SubnetId",
            Description="InstanceId of the newly created EC2 instance",
            Value=Ref(subnet),
        ),
    ])

    with open(cfn_file, 'w') as f:
        f.write(template.to_yaml())
コード例 #9
0
ファイル: helpers.py プロジェクト: aws-samples/aws-organized
def provision_stack(stack_name_suffix: str,
                    template: troposphere.Template) -> None:
    with betterboto_client.ClientContextManager(
            "cloudformation") as cloudformation:
        cloudformation.create_or_update(
            StackName=f"AWSOrganized-{stack_name_suffix}",
            TemplateBody=template.to_yaml(clean_up=True),
            Capabilities=["CAPABILITY_NAMED_IAM"],
        )
コード例 #10
0
ファイル: pipeline.py プロジェクト: suzuxander/samples
def create_pipeline_template():
    template = Template()

    build_project = __create_build_project(template)

    __create_pipeline(template, build_project)

    with open('./pipeline.yml', mode='w') as file:
        file.write(template.to_yaml())
コード例 #11
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('location', type=str)
    parser.add_argument('--format', default='json', choices=['json', 'yaml'])

    group = parser.add_mutually_exclusive_group(required=True)
    group.add_argument(
        '--meta-git-repo',
        action='store_const',
        const=meta_git_repo,
        dest='func',
        help='indicates location is a url of a meta git repo',
    )
    group.add_argument(
        '--services-file',
        action='store_const',
        const=services_file,
        dest='func',
        help='indicates location is a path of a sierra config file',
    )

    if not sys.argv[1:]:
        parser.print_help()
        parser.exit()

    args = parser.parse_args()

    services = args.func(args.location)

    # start of template generation

    template = Template()

    bucket = template.add_parameter(
        Parameter(
            'TemplateBucket',
            Description='The S3 bucket containing all of the templates.',
            Type='String',
        ))

    cluster = template.add_resource(
        cf.Stack(
            'Cluster',
            TemplateURL=Sub(
                'https://s3.amazonaws.com/${TemplateBucket}/templates/ecs-cluster.yaml'
            ),
            Parameters={
                'InstanceType': 't2.micro',
            }))

    if args.format == 'json':
        result = template.to_json()
    else:
        result = template.to_yaml()

    print(result)
コード例 #12
0
def create_alb_template():
    template = Template()

    vpc = template.add_parameter(
        parameter=Parameter(title='Vpc', Type='String'))
    subnet_a = template.add_parameter(
        parameter=Parameter(title='SubnetA', Type='String'))
    subnet_b = template.add_parameter(
        parameter=Parameter(title='SubnetB', Type='String'))
    ec2_instance = template.add_parameter(
        parameter=Parameter(title='Ec2Instance', Type='String'))

    security_group = template.add_resource(
        resource=SecurityGroup(title='SampleSecurityGroup',
                               GroupDescription='sample-security-group',
                               SecurityGroupIngress=[{
                                   'IpProtocol': 'tcp',
                                   'FromPort': 80,
                                   'ToPort': 80,
                                   'CidrIp': '0.0.0.0/0'
                               }],
                               VpcId=Ref(vpc)))

    load_balancer = template.add_resource(resource=LoadBalancer(
        title='SampleLoadBalancer',
        Name='sample-alb',
        Subnets=[Ref(subnet_a), Ref(subnet_b)],
        SecurityGroups=[Ref(security_group)],
    ))

    target_group = template.add_resource(resource=TargetGroup(
        title='SampleTargetGroup',
        Targets=[TargetDescription(
            Id=Ref(ec2_instance),
            Port=80,
        )],
        VpcId=Ref(vpc),
        Name='sample-target-group',
        Port=80,
        Protocol='HTTP',
    ))

    template.add_resource(resource=Listener(
        title='SampleListener',
        DefaultActions=[
            Action(TargetGroupArn=Ref(target_group), Type='forward')
        ],
        LoadBalancerArn=Ref(load_balancer),
        Port=80,
        Protocol='HTTP',
    ))

    with open('./alb.yml', mode='w') as file:
        file.write(template.to_yaml())
コード例 #13
0
 def test_s3_bucket(self):
     t = Template()
     t.add_description("S3 Bucket Example")
     s3bucket = t.add_resource(s3.Bucket(
         "S3Bucket", AccessControl=s3.PublicRead,))
     t.add_output(Output(
         "BucketName",
         Value=Ref(s3bucket),
         Description="Name of S3 bucket to hold website content"
     ))
     self.assertEqual(s3_bucket_yaml, t.to_yaml())
コード例 #14
0
def create_vpc_template():
    template = Template()

    vpc_cidr = template.add_parameter(parameter=Parameter(
        title='VpcCidr', Type='String', Default='192.168.0.0/16'))

    subnet_cidr_a = template.add_parameter(parameter=Parameter(
        title='SubnetCidr1', Type='String', Default='192.168.1.0/24'))

    subnet_cidr_b = template.add_parameter(parameter=Parameter(
        title='SubnetCidr2', Type='String', Default='192.168.2.0/24'))

    vpc = template.add_resource(resource=VPC(
        title='SampleVpc', CidrBlock=Ref(vpc_cidr), EnableDnsHostnames=True))

    igw = template.add_resource(resource=InternetGateway(title='SampleIgw'))

    template.add_resource(resource=VPCGatewayAttachment(
        title='SampleAttachment', VpcId=Ref(vpc), InternetGatewayId=Ref(igw)))

    subnet_a = template.add_resource(
        resource=Subnet(title='SampleSubnetA',
                        AvailabilityZone='us-east-1a',
                        CidrBlock=Ref(subnet_cidr_a),
                        MapPublicIpOnLaunch=True,
                        VpcId=Ref(vpc)))

    subnet_b = template.add_resource(
        resource=Subnet(title='SampleSubnetB',
                        AvailabilityZone='us-east-1b',
                        CidrBlock=Ref(subnet_cidr_b),
                        MapPublicIpOnLaunch=True,
                        VpcId=Ref(vpc)))

    route_table = template.add_resource(
        resource=RouteTable(title='SampleRoteTable', VpcId=Ref(vpc)))

    template.add_resource(resource=SubnetRouteTableAssociation(
        title='SampleRoteTableAssociationA',
        RouteTableId=Ref(route_table),
        SubnetId=Ref(subnet_a)))

    template.add_resource(resource=SubnetRouteTableAssociation(
        title='SampleRoteTableAssociationB',
        RouteTableId=Ref(route_table),
        SubnetId=Ref(subnet_b)))

    template.add_resource(resource=Route(title='SampleRoute',
                                         DestinationCidrBlock='0.0.0.0/0',
                                         GatewayId=Ref(igw),
                                         RouteTableId=Ref(route_table)))

    with open('./vpc.yml', mode='w') as file:
        file.write(template.to_yaml())
コード例 #15
0
class Base(metaclass=ABCMeta):
    def __init__(self, sceptre_user_data: Dict):
        self.sceptre_user_data = sceptre_user_data
        self.tpl = Template()
        self.create_template()

    @abstractmethod
    def create_template(self):
        pass

    def to_yaml(self):
        return self.tpl.to_yaml()
コード例 #16
0
def create_autoscaling_stack(projectName):

    cloudformationClient = boto3.client('cloudformation',
                                        endpoint_url='http://localhost:4581',
                                        region_name='us-west-2')

    t = Template()

    t.set_version('2010-09-09')

    t.set_description(
        "LocalStackTests Task two cloud formation template of AutoScalingGroup "
    )

    ssh_rule = create_security_group_rule("tcp", "22", "22", "0.0.0.0/0")
    http_rule = create_security_group_rule(
        "tcp", "80", "80", "0.0.0.0/0")  # assume we are ruing on 80 port 80
    ec2SecurityGroup = create_security_group_resource(
        projectName + "EC2SecurityGroup", [ssh_rule, http_rule],
        "EC2 Security Group")

    launchConfigName = projectName + "LaunchConfiguration"

    launchConfig = create_launch_configuration_resource(
        launchConfigName, [Ref(ec2SecurityGroup)])

    autoscaling = create_autoscaling_resource(projectName, launchConfig)

    t.add_resource(ec2SecurityGroup)
    t.add_resource(launchConfig)
    t.add_resource(autoscaling)

    stackName = projectName + 'Task2'

    try:
        task2_stack = cloudformationClient.create_stack(
            StackName=stackName, TemplateBody=t.to_yaml())
    except Exception as e:
        # TODO: check other exceptions
        pass

    stackReady = wait_resource(cloudformationClient.describe_stacks,
                               check_cloudformation_stack_complete,
                               10,
                               StackName=stackName)

    if stackReady:
        res = cloudformationClient.describe_stacks(StackName=stackName)
        return res['Stacks'][0]
    else:
        raise Exception(
            "Fails to get recently created stack, try to wait for more time")
コード例 #17
0
def generate(template: Template, template_path: str,
             template_format: TemplateFormat):
    print(f'Generating template: {template_path}')

    pathlib.Path(os.path.dirname(os.path.abspath(template_path))).mkdir(
        parents=True, exist_ok=True)

    if template_format == TemplateFormat.YAML:
        with open(template_path, 'w') as f:
            f.write(template.to_yaml())
    else:
        with open(template_path, 'w') as f:
            f.write(template.to_json())
コード例 #18
0
def create_vpc_template():
    template = Template()

    vpc_cidr = template.add_parameter(parameter=Parameter(
        title='VpcCidr', Type='String', Default='192.168.0.0/16'))

    vpc = template.add_resource(
        resource=VPC(title='SampleVpc', CidrBlock=Ref(vpc_cidr)))

    public_subnet = __create_public_subnet(template, vpc)
    __create_private_subnet(template, vpc)
    __create_dmz_subnet(template, vpc, public_subnet)

    with open('./vpc.yml', mode='w') as file:
        file.write(template.to_yaml())
コード例 #19
0
class TestCfTemplate(object):
    def setup_method(self):
        self.template = Template()

    @mock_cloudformation
    def test_ec2_resource(self, cf_client, vpc_data):
        ec2_resource = cf_resources.ec2_instance(name='test-ec2-bastion-cli',
                                                 ami_id='ami-12345',
                                                 keyname='foo',
                                                 instance_type='t2.micro',
                                                 sg_ids=['sg-12345'],
                                                 subnet_id='subnet-12345')

        self.template.add_resource(ec2_resource)
        cf_client.create_stack(StackName='test_ec2_resource',
                               TemplateBody=self.template.to_yaml())
コード例 #20
0
ファイル: function.py プロジェクト: suzuxander/samples
def create_function_template():
    template = Template()
    template.set_transform('AWS::Serverless-2016-10-31')

    template.add_resource(resource=Function(
        title='SampleLambdaFunction',
        CodeUri='.',
        FunctionName='sample-lambda-function',
        Handler='lambda_function.lambda_handler',
        Role=ImportValue(
            CommonResource.ExportName.LAMBDA_SERVICE_ROLE_ARN.value),
        Runtime='python3.7',
    ))

    with open('./function.yml', mode='w') as file:
        file.write(template.to_yaml())
コード例 #21
0
ファイル: ec2.py プロジェクト: suzuxander/samples
def create_ec2_template():
    template = Template()

    vpc = template.add_parameter(
        parameter=Parameter(title='Vpc', Type='String'))

    subnet = template.add_parameter(
        parameter=Parameter(title='Subnet', Type='String'))

    ami_image = template.add_parameter(parameter=Parameter(
        title='AmiImage', Default='ami-0e2ff28bfb72a4e45', Type='String'))

    key_name = template.add_parameter(
        parameter=Parameter(title='KeyName', Type='String'))

    my_ip = template.add_parameter(
        parameter=Parameter(title='MyIp', Type='String'))

    security_group = template.add_resource(
        resource=SecurityGroup(title='SampleSecurityGroup',
                               GroupDescription='sample',
                               VpcId=Ref(vpc),
                               SecurityGroupIngress=[{
                                   'IpProtocol': 'tcp',
                                   'FromPort': 80,
                                   'ToPort': 80,
                                   'CidrIp': '0.0.0.0/0',
                               }, {
                                   'IpProtocol': 'tcp',
                                   'FromPort': 22,
                                   'ToPort': 22,
                                   'CidrIp': Ref(my_ip),
                               }]))

    template.add_resource(resource=Instance(
        title='SampleEc2Instance',
        SubnetId=Ref(subnet),
        SecurityGroupIds=[Ref(security_group)],
        InstanceType='t2.micro',
        ImageId=Ref(ami_image),
        KeyName=Ref(key_name),
    ))

    with open('./ec2.yml', mode='w') as file:
        file.write(template.to_yaml())
コード例 #22
0
ファイル: driver.py プロジェクト: suzuxander/samples
def create_template():
    template = Template()

    vpc = template.add_parameter(
        parameter=Parameter(title='Vpc', Type='String'))

    key_name = template.add_parameter(
        parameter=Parameter(title='KeyName', Type='String'))

    subnet_a = template.add_resource(
        resource=Subnet(title='SampleSubnetA',
                        AvailabilityZone='us-east-1a',
                        CidrBlock='192.168.10.0/24',
                        MapPublicIpOnLaunch=True,
                        VpcId=Ref(vpc)))
    template.add_resource(resource=Subnet(title='SampleSubnetB',
                                          AvailabilityZone='us-east-1b',
                                          CidrBlock='192.168.11.0/24',
                                          MapPublicIpOnLaunch=True,
                                          VpcId=Ref(vpc)))

    security_group = template.add_resource(
        resource=SecurityGroup(title='SampleSecurityGroup',
                               GroupDescription='sample',
                               VpcId=Ref(vpc),
                               SecurityGroupIngress=[{
                                   'IpProtocol': 'tcp',
                                   'FromPort': 80,
                                   'ToPort': 80,
                                   'CidrIp': '0.0.0.0/0',
                               }]))

    template.add_resource(resource=Instance(
        title='SampleEc2Instance',
        SubnetId=Ref(subnet_a),
        SecurityGroupIds=[Ref(security_group)],
        InstanceType='t2.micro',
        ImageId='ami-0e2ff28bfb72a4e45',
        KeyName=Ref(key_name),
    ))

    with open('./driver.yml', mode='w') as file:
        file.write(template.to_yaml())
コード例 #23
0
ファイル: function.py プロジェクト: suzuxander/samples
def create_cloud_front_template():
    template = Template()
    template.set_transform('AWS::Serverless-2016-10-31')

    service_role = template.add_resource(
        resource=Role(title='SampleLambdaServiceRole',
                      RoleName='sample-lambda-service-role',
                      Path='/',
                      AssumeRolePolicyDocument={
                          "Statement": [{
                              "Effect": "Allow",
                              "Principal": {
                                  "Service": ['lambda.amazonaws.com']
                              },
                              "Action": ["sts:AssumeRole"]
                          }]
                      },
                      Policies=[
                          Policy(PolicyName="sample-policy",
                                 PolicyDocument={
                                     "Version":
                                     "2012-10-17",
                                     "Statement": [{
                                         "Action": 'lambda:*',
                                         "Resource": '*',
                                         "Effect": "Allow"
                                     }]
                                 })
                      ]))

    template.add_resource(resource=Function(
        title='SampleLambdaFunction',
        AutoPublishAlias='sample',
        CodeUri='.',
        FunctionName='sample-lambda-function',
        Handler='lambda_function.lambda_handler',
        Role=GetAtt(logicalName=service_role, attrName='Arn'),
        Runtime='python3.7',
    ))

    with open('./function.yml', mode='w') as file:
        file.write(template.to_yaml())
コード例 #24
0
ファイル: template.py プロジェクト: fnAttic/statemachine-aws
def main():
    """main function"""
    t = Template()
    t.set_transform('AWS::Serverless-2016-10-31')
    # add resources to the template
    resources = [] \
                + document_fsm.DocumentReviewMachine.cf_resources() \
                + document_fsm.launch.cf_resources() \
                + document_fsm.transition.cf_resources() \
                + document_fsm.info.cf_resources()

    resources += document_tasks.archive_document.cf_resources() \
                 + document_tasks.delete_document.cf_resources() \
                 + document_tasks.notify_reviewer.cf_resources() \
                 + document_tasks.notify_uploader.cf_resources() \
                 + document_tasks.summarize_document.cf_resources()
    for r in resources:
        t.add_resource(r)
    # output yaml template
    print(t.to_yaml())
コード例 #25
0
def main():
    t = Template('GeoWave Admin UI dev server')

    t.add_parameter(PARAMS)

    t.add_resource(create_dnsrecords())
    t.add_resource(create_loadbalancer())
    t.add_resource(create_instance())
    t.add_resource(create_routing())
    t.add_resource(create_securitygroups())
    t.add_resource(create_subnets())
    t.add_resource(create_vpc())

    t.add_output(Output('application', Value=_subdomain_for_application()))
    t.add_output(Output('instance', Value=_subdomain_for_instance()))
    t.add_output(Output('jenkins', Value=_subdomain_for_jenkins()))
    t.add_output(Output('loadbalancer', Value=GetAtt('tlsFrontend',
                                                     'DNSName')))

    print(t.to_yaml())
コード例 #26
0
ファイル: function.py プロジェクト: suzuxander/samples
def create_function_template():
    template = Template()
    template.set_transform('AWS::Serverless-2016-10-31')

    bucket = template.add_resource(resource=Function(
        title='SampleLambdaFunction',
        AutoPublishAlias='sample',
        CodeUri='.',
        FunctionName='sample-lambda-edge-function',
        Handler='lambda_function.lambda_handler',
        Role=ImportValue(
            CommonResource.ExportName.LAMBDA_EDGE_SERVICE_ROLE_ARN.value),
        Runtime='python3.7',
    ))

    template.add_output(output=Output(title=bucket.title,
                                      Value=GetAtt(bucket, 'Arn'),
                                      Export=Export(name=get_export_name())))

    with open('./function.yml', mode='w') as file:
        file.write(template.to_yaml())
コード例 #27
0
def instantiate_CF_template(template: Template,
                            stack_name: str = "unnamed",
                            **params) -> None:
    client = boto3.client('cloudformation')
    logging.info(f"Validating stack {stack_name}")
    tpl_yaml = template.to_yaml()
    validate_result = client.validate_template(TemplateBody=tpl_yaml)
    logging.info(f"Creating stack {stack_name}")
    stack_params = dict(
        StackName=stack_name,
        TemplateBody=tpl_yaml,
        Parameters=[],
        Capabilities=['CAPABILITY_IAM'],
        #OnFailure = 'DELETE',
    )
    stack_params.update(params)
    if stack_exists(client, stack_name):
        logging.warning(f"Stack '{stack_name}' already exists")
        stacks = client.describe_stacks(StackName=stack_name)
        status = stacks['Stacks'][0]['StackStatus']
        if status == 'ROLLBACK_COMPLETE':
            # Stacks in Rollback complete can't be updated.
            #input("Press enter to delete the stack (is in ROLLBACK_COMPLETE state) or ^C to abort...")
            logging.info("Deleting stack...")
            delete_stack(client, stack_name)
            stack_result = client.create_stack(**stack_params)
            waiter = client.get_waiter('stack_create_complete')
            logging.info("Waiting for stack create...")
            waiter.wait(StackName=stack_name)
        else:
            stack_result = client.update_stack(**stack_params)
            waiter = client.get_waiter('stack_update_complete')
            logging.info("Waiting for stack update...")
            waiter.wait(StackName=stack_name)
    else:
        stack_result = client.create_stack(**stack_params)
        waiter = client.get_waiter('stack_create_complete')
        logging.info("Waiting for stack create...")
        waiter.wait(StackName=stack_name)
コード例 #28
0
ファイル: create_project.py プロジェクト: phillipberndt/s2n
def main(**kwargs):
    """ Create the CFN template and either write to screen or update/create boto3. """
    codebuild = Template()
    codebuild.set_version('2010-09-09')
    # Create a single CloudWatch Event role to allow codebuild:startBuild
    cw_event_role = build_cw_cb_role(codebuild)

    # TODO: There is a problem with the resource statement
    #logging.info('Creating github role: {}', build_github_role(codebuild))
    for job in config.sections():
        if 'CodeBuild:' in job:
            job_title = job.split(':')[1]
            service_role = build_codebuild_role(
                template=codebuild, project_name=job_title).to_dict()
            # Pull the env out of the section, and use the snippet for the other values.
            if 'snippet' in config[job]:
                build_project(template=codebuild,
                              project_name=job_title,
                              section=config.get(job, 'snippet'),
                              service_role=service_role['Ref'],
                              raw_env=config.get(job, 'env'))
            else:
                build_project(template=codebuild,
                              project_name=job_title,
                              section=job,
                              service_role=service_role['Ref'])
            build_cw_event(template=codebuild,
                           project_name=job_title,
                           role=cw_event_role)

    with (open(args.output_dir + "/s2n_codebuild_projects.yml", 'w')) as fh:
        fh.write(codebuild.to_yaml())

    if args.dry_run:
        logging.debug('Dry Run: wrote cfn file, but not calling AWS.')
    else:
        print('Boto functionality goes here.')
コード例 #29
0
class CreateTemplate:
    def __init__(self):
        self.template = Template()

    def add_alarm(self, alarmName):
        self.template.add_resource(Alarm(
            alarmName,
            ComparisonOperator='GreaterThanThreshold',
            Statistic='Maximum',
            EvaluationPeriods=1,
            MetricName='BucketSizeBytes',
            Namespace='S3',
            Period='60',
            Threshold=0
        ))

    def to_yaml(self):
        return self.template.to_yaml()

    def to_json(self):
        return json.dumps(self.to_dict())

    def to_dict(self):
        return self.template.to_dict()
コード例 #30
0
def main(**params):
    try:
        # Metadata
        t = Template()
        t.set_version("2010-09-09")
        t.set_description("(SOCA) - Base template to deploy compute nodes.")
        allow_anonymous_data_collection = params["MetricCollectionAnonymous"]
        debug = False
        mip_usage = False
        instances_list = params["InstanceType"].split("+")
        asg_lt = asg_LaunchTemplate()
        ltd = LaunchTemplateData("NodeLaunchTemplateData")
        mip = MixedInstancesPolicy()
        stack_name = Ref("AWS::StackName")

        # Begin LaunchTemplateData
        UserData = '''#!/bin/bash -xe
export PATH=$PATH:/usr/local/bin
if [[ "''' + params['BaseOS'] + '''" == "centos7" ]] || [[ "''' + params['BaseOS'] + '''" == "rhel7" ]];
    then
        EASY_INSTALL=$(which easy_install-2.7)
        $EASY_INSTALL pip
        PIP=$(which pip2.7)
        $PIP install awscli
        yum install -y nfs-utils # enforce install of nfs-utils
else
     # Upgrade awscli on ALI (do not use yum)
     EASY_INSTALL=$(which easy_install-2.7)
     $EASY_INSTALL pip
     PIP=$(which pip)
     $PIP install awscli --upgrade 
fi
if [[ "''' + params['BaseOS'] + '''" == "amazonlinux2" ]];
    then
        /usr/sbin/update-motd --disable
fi

GET_INSTANCE_TYPE=$(curl http://169.254.169.254/latest/meta-data/instance-type)
echo export "SOCA_CONFIGURATION="''' + str(params['ClusterId']) + '''"" >> /etc/environment
echo export "SOCA_BASE_OS="''' + str(params['BaseOS']) + '''"" >> /etc/environment
echo export "SOCA_JOB_QUEUE="''' + str(params['JobQueue']) + '''"" >> /etc/environment
echo export "SOCA_JOB_OWNER="''' + str(params['JobOwner']) + '''"" >> /etc/environment
echo export "SOCA_JOB_NAME="''' + str(params['JobName']) + '''"" >> /etc/environment
echo export "SOCA_JOB_PROJECT="''' + str(params['JobProject']) + '''"" >> /etc/environment
echo export "SOCA_VERSION="''' + str(params['Version']) + '''"" >> /etc/environment
echo export "SOCA_JOB_EFA="''' + str(params['Efa']).lower() + '''"" >> /etc/environment
echo export "SOCA_JOB_ID="''' + str(params['JobId']) + '''"" >> /etc/environment
echo export "SOCA_SCRATCH_SIZE=''' + str(params['ScratchSize']) + '''" >> /etc/environment
echo export "SOCA_INSTALL_BUCKET="''' + str(params['S3Bucket']) + '''"" >> /etc/environment
echo export "SOCA_INSTALL_BUCKET_FOLDER="''' + str(params['S3InstallFolder']) + '''"" >> /etc/environment
echo export "SOCA_FSX_LUSTRE_BUCKET="''' + str(params['FSxLustreConfiguration']['fsx_lustre']).lower() + '''"" >> /etc/environment
echo export "SOCA_FSX_LUSTRE_DNS="''' + str(params['FSxLustreConfiguration']['existing_fsx']).lower() + '''"" >> /etc/environment
echo export "SOCA_INSTANCE_TYPE=$GET_INSTANCE_TYPE" >> /etc/environment
echo export "SOCA_INSTANCE_HYPERTHREADING="''' + str(params['ThreadsPerCore']).lower() + '''"" >> /etc/environment
echo export "SOCA_HOST_SYSTEM_LOG="/apps/soca/''' + str(params['ClusterId']) + '''/cluster_node_bootstrap/logs/''' + str(params['JobId']) + '''/$(hostname -s)"" >> /etc/environment
echo export "AWS_STACK_ID=${AWS::StackName}" >> /etc/environment
echo export "AWS_DEFAULT_REGION=${AWS::Region}" >> /etc/environment


source /etc/environment
AWS=$(which aws)

# Give yum permission to the user on this specific machine
echo "''' + params['JobOwner'] + ''' ALL=(ALL) /bin/yum" >> /etc/sudoers

mkdir -p /apps
mkdir -p /data

# Mount EFS
echo "''' + params['EFSDataDns'] + ''':/ /data nfs4 nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2 0 0" >> /etc/fstab
echo "''' + params['EFSAppsDns'] + ''':/ /apps nfs4 nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2 0 0" >> /etc/fstab
mount -a 

# Configure NTP
yum remove -y ntp
yum install -y chrony
mv /etc/chrony.conf  /etc/chrony.conf.original
echo -e """
# use the local instance NTP service, if available
server 169.254.169.123 prefer iburst minpoll 4 maxpoll 4

# Use public servers from the pool.ntp.org project.
# Please consider joining the pool (http://www.pool.ntp.org/join.html).
# !!! [BEGIN] SOCA REQUIREMENT
# You will need to open UDP egress traffic on your security group if you want to enable public pool
#pool 2.amazon.pool.ntp.org iburst
# !!! [END] SOCA REQUIREMENT
# Record the rate at which the system clock gains/losses time.
driftfile /var/lib/chrony/drift

# Allow the system clock to be stepped in the first three updates
# if its offset is larger than 1 second.
makestep 1.0 3

# Specify file containing keys for NTP authentication.
keyfile /etc/chrony.keys

# Specify directory for log files.
logdir /var/log/chrony

# save data between restarts for fast re-load
dumponexit
dumpdir /var/run/chrony
""" > /etc/chrony.conf
systemctl enable chronyd

# Prepare  Log folder
mkdir -p $SOCA_HOST_SYSTEM_LOG
echo "@reboot /bin/bash /apps/soca/$SOCA_CONFIGURATION/cluster_node_bootstrap/ComputeNodePostReboot.sh >> $SOCA_HOST_SYSTEM_LOG/ComputeNodePostInstall.log 2>&1" | crontab -
$AWS s3 cp s3://$SOCA_INSTALL_BUCKET/$SOCA_INSTALL_BUCKET_FOLDER/scripts/config.cfg /root/
/bin/bash /apps/soca/$SOCA_CONFIGURATION/cluster_node_bootstrap/ComputeNode.sh ''' + params['SchedulerHostname'] + ''' >> $SOCA_HOST_SYSTEM_LOG/ComputeNode.sh.log 2>&1'''

        ltd.EbsOptimized = True
        for instance in instances_list:
            if "t2." in instance:
                ltd.EbsOptimized = False
            else:
                # t2 does not support CpuOptions
                ltd.CpuOptions = CpuOptions(
                    CoreCount=int(params["CoreCount"]),
                    ThreadsPerCore=1 if params["ThreadsPerCore"] is False else 2)

        ltd.IamInstanceProfile = IamInstanceProfile(Arn=params["ComputeNodeInstanceProfileArn"])
        ltd.KeyName = params["SSHKeyPair"]
        ltd.ImageId = params["ImageId"]
        if params["SpotPrice"] is not False and params["SpotAllocationCount"] is False:
            ltd.InstanceMarketOptions = InstanceMarketOptions(
                MarketType="spot",
                SpotOptions=SpotOptions(
                    MaxPrice=Ref("AWS::NoValue") if params["SpotPrice"] == "auto" else str(params["SpotPrice"])
                    # auto -> cap at OD price
                )
            )
        ltd.InstanceType = instances_list[0]
        ltd.NetworkInterfaces = [NetworkInterfaces(
            InterfaceType="efa" if params["Efa"] is not False else Ref("AWS::NoValue"),
            DeleteOnTermination=True,
            DeviceIndex=0,
            Groups=[params["SecurityGroupId"]]
        )]
        ltd.UserData = Base64(Sub(UserData))
        ltd.BlockDeviceMappings = [
            BlockDeviceMapping(
                DeviceName="/dev/xvda" if params["BaseOS"] == "amazonlinux2" else "/dev/sda1",
                Ebs=EBSBlockDevice(
                    VolumeSize=params["RootSize"],
                    VolumeType="gp2",
                    DeleteOnTermination="false" if params["KeepEbs"] is True else "true",
                    Encrypted=True))
        ]
        if int(params["ScratchSize"]) > 0:
            ltd.BlockDeviceMappings.append(
                BlockDeviceMapping(
                    DeviceName="/dev/xvdbx",
                    Ebs=EBSBlockDevice(
                        VolumeSize=params["ScratchSize"],
                        VolumeType="io1" if int(params["VolumeTypeIops"]) > 0 else "gp2",
                        Iops=params["VolumeTypeIops"] if int(params["VolumeTypeIops"]) > 0 else Ref("AWS::NoValue"),
                        DeleteOnTermination="false" if params["KeepEbs"] is True else "true",
                        Encrypted=True))
            )
        # End LaunchTemplateData

        # Begin Launch Template Resource
        lt = LaunchTemplate("NodeLaunchTemplate")
        lt.LaunchTemplateName = params["ClusterId"] + "-" + str(params["JobId"])
        lt.LaunchTemplateData = ltd
        t.add_resource(lt)
        # End Launch Template Resource

        asg_lt.LaunchTemplateSpecification = LaunchTemplateSpecification(
            LaunchTemplateId=Ref(lt),
            Version=GetAtt(lt, "LatestVersionNumber")
        )

        asg_lt.Overrides = []
        for instance in instances_list:
            asg_lt.Overrides.append(LaunchTemplateOverrides(
                InstanceType=instance))

        # Begin InstancesDistribution
        if params["SpotPrice"] is not False and \
                params["SpotAllocationCount"] is not False and \
                (params["DesiredCapacity"] - params["SpotAllocationCount"]) > 0:
            mip_usage = True
            idistribution = InstancesDistribution()
            idistribution.OnDemandAllocationStrategy = "prioritized"  # only supported value
            idistribution.OnDemandBaseCapacity = params["DesiredCapacity"] - params["SpotAllocationCount"]
            idistribution.OnDemandPercentageAboveBaseCapacity = "0"  # force the other instances to be SPOT
            idistribution.SpotMaxPrice = Ref("AWS::NoValue") if params["SpotPrice"] == "auto" else str(
                params["SpotPrice"])
            idistribution.SpotAllocationStrategy = params['SpotAllocationStrategy']
            mip.InstancesDistribution = idistribution

        # End MixedPolicyInstance

        # Begin FSx for Lustre
        if params["FSxLustreConfiguration"]["fsx_lustre"] is not False:
            if params["FSxLustreConfiguration"]["existing_fsx"] is False:
                fsx_lustre = FileSystem("FSxForLustre")
                fsx_lustre.FileSystemType = "LUSTRE"
                fsx_lustre.StorageCapacity = params["FSxLustreConfiguration"]["capacity"]
                fsx_lustre.SecurityGroupIds = [params["SecurityGroupId"]]
                fsx_lustre.SubnetIds = params["SubnetId"]

                if params["FSxLustreConfiguration"]["s3_backend"] is not False:
                    fsx_lustre_configuration = LustreConfiguration()
                    fsx_lustre_configuration.ImportPath = params["FSxLustreConfiguration"]["import_path"] if params["FSxLustreConfiguration"]["import_path"] is not False else params["FSxLustreConfiguration"]["s3_backend"]
                    fsx_lustre_configuration.ExportPath = params["FSxLustreConfiguration"]["import_path"] if params["FSxLustreConfiguration"]["import_path"] is not False else params["FSxLustreConfiguration"]["s3_backend"] + "/" + params["ClusterId"] + "-fsxoutput/job-" +  params["JobId"] + "/"
                    fsx_lustre.LustreConfiguration = fsx_lustre_configuration

                fsx_lustre.Tags = base_Tags(
                    # False disable PropagateAtLaunch
                    Name=str(params["ClusterId"] + "-compute-job-" + params["JobId"]),
                    _soca_JobId=str(params["JobId"]),
                    _soca_JobName=str(params["JobName"]),
                    _soca_JobQueue=str(params["JobQueue"]),
                    _soca_StackId=stack_name,
                    _soca_JobOwner=str(params["JobOwner"]),
                    _soca_JobProject=str(params["JobProject"]),
                    _soca_KeepForever=str(params["KeepForever"]).lower(),
                    _soca_FSx="true",
                    _soca_ClusterId=str(params["ClusterId"]),
                )
                t.add_resource(fsx_lustre)
        # End FSx For Lustre

        # Begin AutoScalingGroup Resource
        asg = AutoScalingGroup("AutoScalingComputeGroup")
        asg.DependsOn = "NodeLaunchTemplate"
        if mip_usage is True or instances_list.__len__() > 1:
            mip.LaunchTemplate = asg_lt
            asg.MixedInstancesPolicy = mip

        else:
            asg.LaunchTemplate = LaunchTemplateSpecification(
                LaunchTemplateId=Ref(lt),
                Version=GetAtt(lt, "LatestVersionNumber"))

        asg.MinSize = int(params["DesiredCapacity"])
        asg.MaxSize = int(params["DesiredCapacity"])
        asg.VPCZoneIdentifier = params["SubnetId"]

        if params["PlacementGroup"] is True:
            pg = PlacementGroup("ComputeNodePlacementGroup")
            pg.Strategy = "cluster"
            t.add_resource(pg)
            asg.PlacementGroup = Ref(pg)

        asg.Tags = Tags(
            Name=str(params["ClusterId"]) + "-compute-job-" + str(params["JobId"]),
            _soca_JobId=str(params["JobId"]),
            _soca_JobName=str(params["JobName"]),
            _soca_JobQueue=str(params["JobQueue"]),
            _soca_StackId=stack_name,
            _soca_JobOwner=str(params["JobOwner"]),
            _soca_JobProject=str(params["JobProject"]),
            _soca_KeepForever=str(params["KeepForever"]).lower(),
            _soca_ClusterId=str(params["ClusterId"]),
            _soca_NodeType="soca-compute-node")
        t.add_resource(asg)
        # End AutoScalingGroup Resource

        # Begin Custom Resource
        # Change Mapping to No if you want to disable this
        if allow_anonymous_data_collection is True:
            metrics = CustomResourceSendAnonymousMetrics("SendAnonymousData")
            metrics.ServiceToken = params["SolutionMetricLambda"]
            metrics.DesiredCapacity = str(params["DesiredCapacity"])
            metrics.InstanceType = str(params["InstanceType"])
            metrics.Efa = str(params["Efa"])
            metrics.ScratchSize = str(params["ScratchSize"])
            metrics.RootSize = str(params["RootSize"])
            metrics.SpotPrice = str(params["SpotPrice"])
            metrics.BaseOS = str(params["BaseOS"])
            metrics.StackUUID = str(params["StackUUID"])
            metrics.KeepForever = str(params["KeepForever"])
            metrics.FsxLustre = str(params["FSxLustreConfiguration"])
            t.add_resource(metrics)
            # End Custom Resource

        if debug is True:
            print(t.to_json())

        # Tags must use "soca:<Key>" syntax
        template_output = t.to_yaml().replace("_soca_", "soca:")
        return {'success': True,
                'output': template_output}

    except Exception as e:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
        return {'success': False,
                'output': 'cloudformation_builder.py: ' + (
                            str(e) + ': error :' + str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))}
コード例 #31
0
"""
Your module description
"""
from troposphere import Ref, Template
import troposphere.ec2 as ec2

template = Template()

envs = ['dev', 'test', 'prod']

for x in envs:
    instancename = x + "Ec2"
    ec2_instance = template.add_resource(ec2.Instance(
        instancename,
        ImageId="ami-a7a242da",
        InstanceType="t2.nano",
        ))

fh = open("template.yaml", "a")
fh.writelines(template.to_yaml())
fh.close()