},
        "eu-central-1": {
            "AMI": "ami-034fffcc6a0063961"
        },
        "sa-east-1": {
            "AMI": "ami-0112d42866980b373"
        }
    })

ec2_instance = template.add_resource(
    ec2.Instance("Ec2Instance",
                 ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"),
                 InstanceType="t2.micro",
                 KeyName=Ref(keyname_param),
                 SecurityGroups=["default"],
                 UserData=Base64("80")))

template.add_output([
    Output(
        "InstanceId",
        Description="InstanceId of the newly created EC2 instance",
        Value=Ref(ec2_instance),
    ),
    Output(
        "AZ",
        Description="Availability Zone of the newly created EC2 instance",
        Value=GetAtt(ec2_instance, "AvailabilityZone"),
    ),
    Output(
        "PublicIP",
        Description="Public IP address of the newly created EC2 instance",
Example #2
0
            ),
            ec2.SecurityGroupRule(
                IpProtocol="tcp",
                FromPort=ApplicationPort,
                ToPort=ApplicationPort,
                CidrIp="0.0.0.0/0",
            ),
        ],
    ))

ud = Base64(
    Join('\n', [
        "#!/bin/bash", "sudo yum install --enablerepo=epel -y git",
        "sudo yum -y install python34",
        "sudo wget https://bootstrap.pypa.io/get-pip.py -O /home/ec2-user/get-pip.py",
        "sudo python3 /home/ec2-user/get-pip.py --user",
        "sudo pip install --upgrade pip",
        "sudo /usr/local/bin/pip install --upgrade setuptools",
        "sudo /usr/local/bin/pip install ansible", AnsiblePullCmd,
        "sudo echo '*/10 * * * * ec2-user {}' > /etc/cron.d/ansible-pull".
        format(AnsiblePullCmd)
    ]))

t.add_resource(
    Role("Role",
         AssumeRolePolicyDocument=Policy(Statement=[
             Statement(Effect=Allow,
                       Action=[AssumeRole],
                       Principal=Principal("Service", ["ec2.amazonaws.com"]))
         ])))

t.add_resource(
            ec2.SecurityGroupRule(
                IpProtocol="tcp",
                FromPort=ApplicationPort,
                ToPort=ApplicationPort,
                CidrIp="0.0.0.0/0",
            ),
        ],
    ))

ud = Base64(
    Join(
        '\n',
        [
            "#!/bin/bash", "sudo apt update", "sudo apt install nodejs",
            "wget http://bit.ly/2vESNuc -O /home/ubuntu/helloworld.js",
            "sudo wget https://raw.githubusercontent.com/Ohizzy21/EffectiveDevOpsTemplates/master/Helloworld.service -O /etc/systemd/system/helloworld.service",
            "sudo systemctl daemon-reload", "sudo systemctl enable helloworld",
            "sudo systemctl start helloworld"
            #    "cat helloworld.js",
            #    "wget http://bit.ly/2vVvT18 -O /home/ubuntu/helloworld.conf",
            #    "sudo systemctl start helloworld"
        ]))

t.add_resource(
    ec2.Instance(
        "instance",
        ImageId="ami-085925f297f89fce1",
        InstanceType="t2.micro",
        SecurityGroups=[Ref("SecurityGroup")],
        KeyName=Ref("KeyPair"),
        UserData=ud,
Example #4
0
 def create_answer(self, command_list, delimiter=""):
     return Base64(Join(delimiter, command_list)).to_dict()
Example #5
0
    def _add_ec2_auto_scaling(self):
        instance_profile = self._add_instance_profile()
        self.sg_alb = SecurityGroup(
            "SecurityGroupAlb",
            VpcId=Ref(self.vpc),
            GroupDescription=Sub("${AWS::StackName}-alb"))
        self.template.add_resource(self.sg_alb)
        self.sg_hosts = SecurityGroup(
            "SecurityGroupEc2Hosts",
            SecurityGroupIngress=[{
                'SourceSecurityGroupId': Ref(self.sg_alb),
                'IpProtocol': -1
            }],
            VpcId=Ref(self.vpc),
            GroupDescription=Sub("${AWS::StackName}-hosts"))
        self.template.add_resource(self.sg_hosts)

        sg_host_ingress = SecurityGroupIngress("SecurityEc2HostsIngress",
                                               SourceSecurityGroupId=Ref(
                                                   self.sg_hosts),
                                               IpProtocol="-1",
                                               GroupId=Ref(self.sg_hosts),
                                               FromPort="-1",
                                               ToPort="-1")
        self.template.add_resource(sg_host_ingress)

        database_security_group = SecurityGroup(
            "SecurityGroupDatabases",
            SecurityGroupIngress=[{
                'SourceSecurityGroupId': Ref(self.sg_hosts),
                'IpProtocol': -1
            }],
            VpcId=Ref(self.vpc),
            GroupDescription=Sub("${AWS::StackName}-databases"))
        self.template.add_resource(database_security_group)
        user_data = Base64(
            Sub('\n'.join([
                "#!/bin/bash", "yum update -y",
                "yum install -y aws-cfn-bootstrap",
                "/opt/aws/bin/cfn-init -v --region ${AWS::Region} --stack ${AWS::StackName} --resource LaunchConfiguration",
                "/opt/aws/bin/cfn-signal -e $? --region ${AWS::Region} --stack ${AWS::StackName} --resource AutoScalingGroup",
                "yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm",
                "systemctl enable amazon-ssm-agent",
                "systemctl start amazon-ssm-agent", ""
            ])))
        lc_metadata = cloudformation.Init({
            "config":
            cloudformation.InitConfig(
                files=cloudformation.InitFiles({
                    "/etc/cfn/cfn-hup.conf":
                    cloudformation.InitFile(
                        content=Sub('\n'.join([
                            '[main]', 'stack=${AWS::StackId}',
                            'region=${AWS::Region}', ''
                        ])),
                        mode='256',  # TODO: Why 256
                        owner="root",
                        group="root"),
                    "/etc/cfn/hooks.d/cfn-auto-reloader.conf":
                    cloudformation.InitFile(content=Sub('\n'.join([
                        '[cfn-auto-reloader-hook]', 'triggers=post.update',
                        'path=Resources.ContainerInstances.Metadata.AWS::CloudFormation::Init',
                        'action=/opt/aws/bin/cfn-init -v --region ${AWS::Region} --stack ${AWS::StackName} --resource LaunchConfiguration',
                        ''
                    ])), )
                }),
                services={
                    "sysvinit":
                    cloudformation.InitServices({
                        "cfn-hup":
                        cloudformation.InitService(
                            enabled=True,
                            ensureRunning=True,
                            files=[
                                '/etc/cfn/cfn-hup.conf',
                                '/etc/cfn/hooks.d/cfn-auto-reloader.conf'
                            ])
                    })
                },
                commands={
                    '01_add_instance_to_cluster': {
                        'command':
                        Sub('echo "ECS_CLUSTER=${Cluster}\nECS_RESERVED_MEMORY=256" > /etc/ecs/ecs.config'
                            )
                    }
                })
        })
        launch_configuration = LaunchConfiguration(
            'LaunchConfiguration',
            UserData=user_data,
            IamInstanceProfile=Ref(instance_profile),
            SecurityGroups=[Ref(self.sg_hosts)],
            InstanceType=Ref('InstanceType'),
            ImageId=FindInMap("AWSRegionToAMI", Ref("AWS::Region"), "AMI"),
            Metadata=lc_metadata,
            KeyName=Ref(self.key_pair))
        self.template.add_resource(launch_configuration)
        # , PauseTime='PT15M', WaitOnResourceSignals=True, MaxBatchSize=1, MinInstancesInService=1)
        up = AutoScalingRollingUpdate('AutoScalingRollingUpdate')
        # TODO: clean up
        subnets = list(self.private_subnets)
        self.auto_scaling_group = AutoScalingGroup(
            "AutoScalingGroup",
            UpdatePolicy=up,
            DesiredCapacity=self.desired_instances,
            Tags=[{
                'PropagateAtLaunch': True,
                'Value': Sub('${AWS::StackName} - ECS Host'),
                'Key': 'Name'
            }],
            MinSize=Ref('MinSize'),
            MaxSize=Ref('MaxSize'),
            VPCZoneIdentifier=[Ref(subnets.pop()),
                               Ref(subnets.pop())],
            LaunchConfigurationName=Ref(launch_configuration),
            CreationPolicy=CreationPolicy(ResourceSignal=ResourceSignal(
                Timeout='PT15M')))
        self.template.add_resource(self.auto_scaling_group)
        self.cluster_scaling_policy = ScalingPolicy(
            'AutoScalingPolicy',
            AdjustmentType='ChangeInCapacity',
            AutoScalingGroupName=Ref(self.auto_scaling_group),
            Cooldown=300,
            PolicyType='SimpleScaling',
            ScalingAdjustment=1)
        self.template.add_resource(self.cluster_scaling_policy)
))

t.add_resource(InstanceProfile(
    'EC2InstanceProfile',
    Roles=[Ref('EcsClusterRole')],
))

t.add_resource(LaunchConfiguration(
    'ContainerInstances',
    UserData=Base64(Join('', [
        "#!/bin/bash -xe\n",
        "echo ECS_CLUSTER=",
        Ref('ECSCluster'),
        " >> /etc/ecs/ecs.config\n",
        "yum install -y aws-cfn-bootstrap\n",
        "/opt/aws/bin/cfn-signal -e $? ",
        " --stack ",
        Ref('AWS::StackName'),
        " --resource ECSAutoScalingGroup ",
        " --region ",
        Ref('AWS::Region'),
        "\n"])),
    ImageId='ami-045f1b3f87ed83659',
    KeyName=Ref("KeyPair"),
    SecurityGroups=[Ref("SecurityGroup")],
    IamInstanceProfile=Ref('EC2InstanceProfile'),
    InstanceType='t2.micro',
    AssociatePublicIpAddress='true',
))

t.add_resource(AutoScalingGroup(
Example #7
0
            FromPort="22",
            ToPort="22",
            CidrIp=PublicIpCidr,
        ),
        ec2.SecurityGroupRule(
            IpProtocol="tcp",
            FromPort=ApplicationPort,
            ToPort=ApplicationPort,
            CidrIp="0.0.0.0/0",
        ),
    ],
))

ud = Base64(Join('\n', [ "#!/bin/bash", 
"yum install --enablerepo=epel -y git", "pip install ansible", 
AnsiblePullCmd, 
"echo '*/10 * * * * {}' > /etc/cron.d/ansible- pull".format(AnsiblePullCmd) 
])) 

t.add_resource(ec2.Instance(
    "instance",
    ImageId="ami-cfe4b2b0",
    InstanceType="t2.micro",
    SecurityGroups=[Ref("SecurityGroup")],
    KeyName=Ref("KeyPair"),
    UserData=ud,
)) 

t.add_output(Output(
    "InstancePublicIp",
    Description="Public IP of our instance.",
Example #8
0
))

ud = Base64(Join('', [
    "#!/bin/bash\n",
    "yum -y update\n",
    "curl http://10.0.1.20:8081/artifactory/thirdparty/jdk-8u161-linux-x64.rpm -u admin:password --output jdk-8u161-linux-x64.rpm\n",
    "yum -y localinstall jdk-8u161-linux-x64.rpm\n",
    "/usr/sbin/alternatives --install /usr/bin/java java /usr/java/jdk1.8.0_11/bin/java 20000\n",
    "/usr/sbin/alternatives --set java /usr/java/jdk1.8.0_161/jre/bin/java\n",
    "curl --output /etc/yum.repos.d/jenkins.repo https://pkg.jenkins.io/redhat/jenkins.repo\n",
    "rpm --import https://pkg.jenkins.io/redhat/jenkins.io.key\n",
    "yum -y install jenkins python-simplejson\n",
    "service jenkins stop\n",    
    "yum -y remove java-1.7.0-openjdk.x86_64\n",
    "curl https://repos.fedorapeople.org/repos/dchen/apache-maven/epel-apache-maven.repo --output /etc/yum.repos.d/epel-apache-maven.repo\n",    
    "yum install -y apache-maven\n",    
    "service jenkins start\n",
    "chkconfig jenkins on\n",    
    "yum -y install ansible\n",
    "yum -y install git python-lxml\n",
    "pip install lxml\n",
    "chmod 0600 /var/lib/jenkins/.ssh/id_rsa*\n",
    "yum -y install docker\n",
    "service docker start\n",
    "chkconfig docker on\n",
    "usermod -a -G root jenkins\n",
    "curl --output /usr/local/bin/dcos https://downloads.dcos.io/binaries/cli/linux/x86-64/dcos-1.8/dcos\n",
    "chmod +x /usr/local/bin/dcos\n",
    "dcos config set core.dcos_url http://10.10.0.6\n"
]))

t.add_resource(Role(
Example #9
0
def create_instance():
    return ec2.Instance(
        'devserver',
        BlockDeviceMappings=[
            ec2.BlockDeviceMapping(
                DeviceName='/dev/xvda',
                Ebs=ec2.EBSBlockDevice(
                    VolumeSize=100,
                    VolumeType='gp2',
                    DeleteOnTermination=True,
                ),
            ),
        ],
        ImageId=Ref('amiId'),
        InstanceType='t2.medium',
        KeyName=Ref('keypair'),
        SecurityGroupIds=[Ref('secgrpDevServer')],
        SubnetId=Ref('subnetA'),
        Tags=_tags(),
        UserData=Base64(
            textwrap.dedent(r'''
            #!/bin/bash -ex

            exec > >(tee ~/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
            echo BEGIN
            date '+%Y-%m-%d %H:%M:%S'

            # --- System Config
            hostnamectl set-hostname gwa-dev
            yum install -y git jq tree vim

            # --- UX
            cat <<-EOT > /etc/profile.d/ux.sh
                alias vi='vim'
                alias tree='tree -C'
            EOT
            cat <<-EOT >> /etc/vimrc
                set autoindent
                set modeline
                set tabstop=4
                set listchars=tab:——
            EOT

            # --- Docker
            yum install -y docker
            systemctl enable docker
            systemctl start docker

            usermod -aG docker ec2-user

            docker network create geowave-admin

            # --- Jenkins
            sudo wget -O /etc/yum.repos.d/jenkins.repo https://pkg.jenkins.io/redhat-stable/jenkins.repo
            sudo rpm --import https://pkg.jenkins.io/redhat-stable/jenkins.io.key
            yum install -y jenkins java-1.8.0-openjdk

            usermod -aG docker jenkins

            systemctl enable jenkins
            systemctl start jenkins

            echo END
            date '+%Y-%m-%d %H:%M:%S'
        ''').lstrip()),
    )
Example #10
0
def main(**params):
    try:
        # Metadata
        t = Template()
        t.set_version("2010-09-09")
        t.set_description("(SOCA) - Base template to deploy compute nodes.")
        allow_anonymous_data_collection = params["MetricCollectionAnonymous"]
        debug = False
        mip_usage = False
        instances_list = params["InstanceType"].split("+")
        asg_lt = asg_LaunchTemplate()
        ltd = LaunchTemplateData("NodeLaunchTemplateData")
        mip = MixedInstancesPolicy()
        stack_name = Ref("AWS::StackName")

        # Begin LaunchTemplateData
        UserData = '''#!/bin/bash -xe
export PATH=$PATH:/usr/local/bin
if [[ "''' + params['BaseOS'] + '''" == "centos7" ]] || [[ "''' + params['BaseOS'] + '''" == "rhel7" ]];
    then
        EASY_INSTALL=$(which easy_install-2.7)
        $EASY_INSTALL pip
        PIP=$(which pip2.7)
        $PIP install awscli
        yum install -y nfs-utils # enforce install of nfs-utils
else
     # Upgrade awscli on ALI (do not use yum)
     EASY_INSTALL=$(which easy_install-2.7)
     $EASY_INSTALL pip
     PIP=$(which pip)
     $PIP install awscli --upgrade 
fi
if [[ "''' + params['BaseOS'] + '''" == "amazonlinux2" ]];
    then
        /usr/sbin/update-motd --disable
fi

GET_INSTANCE_TYPE=$(curl http://169.254.169.254/latest/meta-data/instance-type)
echo export "SOCA_CONFIGURATION="''' + str(params['ClusterId']) + '''"" >> /etc/environment
echo export "SOCA_BASE_OS="''' + str(params['BaseOS']) + '''"" >> /etc/environment
echo export "SOCA_JOB_QUEUE="''' + str(params['JobQueue']) + '''"" >> /etc/environment
echo export "SOCA_JOB_OWNER="''' + str(params['JobOwner']) + '''"" >> /etc/environment
echo export "SOCA_JOB_NAME="''' + str(params['JobName']) + '''"" >> /etc/environment
echo export "SOCA_JOB_PROJECT="''' + str(params['JobProject']) + '''"" >> /etc/environment
echo export "SOCA_VERSION="''' + str(params['Version']) + '''"" >> /etc/environment
echo export "SOCA_JOB_EFA="''' + str(params['Efa']).lower() + '''"" >> /etc/environment
echo export "SOCA_JOB_ID="''' + str(params['JobId']) + '''"" >> /etc/environment
echo export "SOCA_SCRATCH_SIZE=''' + str(params['ScratchSize']) + '''" >> /etc/environment
echo export "SOCA_INSTALL_BUCKET="''' + str(params['S3Bucket']) + '''"" >> /etc/environment
echo export "SOCA_INSTALL_BUCKET_FOLDER="''' + str(params['S3InstallFolder']) + '''"" >> /etc/environment
echo export "SOCA_FSX_LUSTRE_BUCKET="''' + str(params['FSxLustreConfiguration']['fsx_lustre']).lower() + '''"" >> /etc/environment
echo export "SOCA_FSX_LUSTRE_DNS="''' + str(params['FSxLustreConfiguration']['existing_fsx']).lower() + '''"" >> /etc/environment
echo export "SOCA_INSTANCE_TYPE=$GET_INSTANCE_TYPE" >> /etc/environment
echo export "SOCA_INSTANCE_HYPERTHREADING="''' + str(params['ThreadsPerCore']).lower() + '''"" >> /etc/environment
echo export "SOCA_HOST_SYSTEM_LOG="/apps/soca/''' + str(params['ClusterId']) + '''/cluster_node_bootstrap/logs/''' + str(params['JobId']) + '''/$(hostname -s)"" >> /etc/environment
echo export "AWS_STACK_ID=${AWS::StackName}" >> /etc/environment
echo export "AWS_DEFAULT_REGION=${AWS::Region}" >> /etc/environment


source /etc/environment
AWS=$(which aws)

# Give yum permission to the user on this specific machine
echo "''' + params['JobOwner'] + ''' ALL=(ALL) /bin/yum" >> /etc/sudoers

mkdir -p /apps
mkdir -p /data

# Mount EFS
echo "''' + params['EFSDataDns'] + ''':/ /data nfs4 nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2 0 0" >> /etc/fstab
echo "''' + params['EFSAppsDns'] + ''':/ /apps nfs4 nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2 0 0" >> /etc/fstab
mount -a 

# Configure NTP
yum remove -y ntp
yum install -y chrony
mv /etc/chrony.conf  /etc/chrony.conf.original
echo -e """
# use the local instance NTP service, if available
server 169.254.169.123 prefer iburst minpoll 4 maxpoll 4

# Use public servers from the pool.ntp.org project.
# Please consider joining the pool (http://www.pool.ntp.org/join.html).
# !!! [BEGIN] SOCA REQUIREMENT
# You will need to open UDP egress traffic on your security group if you want to enable public pool
#pool 2.amazon.pool.ntp.org iburst
# !!! [END] SOCA REQUIREMENT
# Record the rate at which the system clock gains/losses time.
driftfile /var/lib/chrony/drift

# Allow the system clock to be stepped in the first three updates
# if its offset is larger than 1 second.
makestep 1.0 3

# Specify file containing keys for NTP authentication.
keyfile /etc/chrony.keys

# Specify directory for log files.
logdir /var/log/chrony

# save data between restarts for fast re-load
dumponexit
dumpdir /var/run/chrony
""" > /etc/chrony.conf
systemctl enable chronyd

# Prepare  Log folder
mkdir -p $SOCA_HOST_SYSTEM_LOG
echo "@reboot /bin/bash /apps/soca/$SOCA_CONFIGURATION/cluster_node_bootstrap/ComputeNodePostReboot.sh >> $SOCA_HOST_SYSTEM_LOG/ComputeNodePostInstall.log 2>&1" | crontab -
$AWS s3 cp s3://$SOCA_INSTALL_BUCKET/$SOCA_INSTALL_BUCKET_FOLDER/scripts/config.cfg /root/
/bin/bash /apps/soca/$SOCA_CONFIGURATION/cluster_node_bootstrap/ComputeNode.sh ''' + params['SchedulerHostname'] + ''' >> $SOCA_HOST_SYSTEM_LOG/ComputeNode.sh.log 2>&1'''

        ltd.EbsOptimized = True
        for instance in instances_list:
            if "t2." in instance:
                ltd.EbsOptimized = False
            else:
                # metal + t2 does not support CpuOptions
                if "metal" not in instance:
                    ltd.CpuOptions = CpuOptions(
                        CoreCount=int(params["CoreCount"]),
                        ThreadsPerCore=1 if params["ThreadsPerCore"] is False else 2)

        ltd.IamInstanceProfile = IamInstanceProfile(Arn=params["ComputeNodeInstanceProfileArn"])
        ltd.KeyName = params["SSHKeyPair"]
        ltd.ImageId = params["ImageId"]
        if params["SpotPrice"] is not False and params["SpotAllocationCount"] is False:
            ltd.InstanceMarketOptions = InstanceMarketOptions(
                MarketType="spot",
                SpotOptions=SpotOptions(
                    MaxPrice=Ref("AWS::NoValue") if params["SpotPrice"] == "auto" else str(params["SpotPrice"])
                    # auto -> cap at OD price
                )
            )
        ltd.InstanceType = instances_list[0]
        ltd.NetworkInterfaces = [NetworkInterfaces(
            InterfaceType="efa" if params["Efa"] is not False else Ref("AWS::NoValue"),
            DeleteOnTermination=True,
            DeviceIndex=0,
            Groups=[params["SecurityGroupId"]]
        )]
        ltd.UserData = Base64(Sub(UserData))
        ltd.BlockDeviceMappings = [
            BlockDeviceMapping(
                DeviceName="/dev/xvda" if params["BaseOS"] == "amazonlinux2" else "/dev/sda1",
                Ebs=EBSBlockDevice(
                    VolumeSize=params["RootSize"],
                    VolumeType="gp2",
                    DeleteOnTermination="false" if params["KeepEbs"] is True else "true",
                    Encrypted=True))
        ]
        if int(params["ScratchSize"]) > 0:
            ltd.BlockDeviceMappings.append(
                BlockDeviceMapping(
                    DeviceName="/dev/xvdbx",
                    Ebs=EBSBlockDevice(
                        VolumeSize=params["ScratchSize"],
                        VolumeType="io1" if int(params["VolumeTypeIops"]) > 0 else "gp2",
                        Iops=params["VolumeTypeIops"] if int(params["VolumeTypeIops"]) > 0 else Ref("AWS::NoValue"),
                        DeleteOnTermination="false" if params["KeepEbs"] is True else "true",
                        Encrypted=True))
            )
        # End LaunchTemplateData

        # Begin Launch Template Resource
        lt = LaunchTemplate("NodeLaunchTemplate")
        lt.LaunchTemplateName = params["ClusterId"] + "-" + str(params["JobId"])
        lt.LaunchTemplateData = ltd
        t.add_resource(lt)
        # End Launch Template Resource

        asg_lt.LaunchTemplateSpecification = LaunchTemplateSpecification(
            LaunchTemplateId=Ref(lt),
            Version=GetAtt(lt, "LatestVersionNumber")
        )

        asg_lt.Overrides = []
        for instance in instances_list:
            asg_lt.Overrides.append(LaunchTemplateOverrides(
                InstanceType=instance))

        # Begin InstancesDistribution
        if params["SpotPrice"] is not False and \
                params["SpotAllocationCount"] is not False and \
                (params["DesiredCapacity"] - params["SpotAllocationCount"]) > 0:
            mip_usage = True
            idistribution = InstancesDistribution()
            idistribution.OnDemandAllocationStrategy = "prioritized"  # only supported value
            idistribution.OnDemandBaseCapacity = params["DesiredCapacity"] - params["SpotAllocationCount"]
            idistribution.OnDemandPercentageAboveBaseCapacity = "0"  # force the other instances to be SPOT
            idistribution.SpotMaxPrice = Ref("AWS::NoValue") if params["SpotPrice"] == "auto" else str(
                params["SpotPrice"])
            idistribution.SpotAllocationStrategy = params['SpotAllocationStrategy']
            mip.InstancesDistribution = idistribution

        # End MixedPolicyInstance

        # Begin FSx for Lustre
        if params["FSxLustreConfiguration"]["fsx_lustre"] is not False:
            if params["FSxLustreConfiguration"]["existing_fsx"] is False:
                fsx_lustre = FileSystem("FSxForLustre")
                fsx_lustre.FileSystemType = "LUSTRE"
                fsx_lustre.StorageCapacity = params["FSxLustreConfiguration"]["capacity"]
                fsx_lustre.SecurityGroupIds = [params["SecurityGroupId"]]
                fsx_lustre.SubnetIds = params["SubnetId"]

                if params["FSxLustreConfiguration"]["s3_backend"] is not False:
                    fsx_lustre_configuration = LustreConfiguration()
                    fsx_lustre_configuration.ImportPath = params["FSxLustreConfiguration"]["import_path"] if params["FSxLustreConfiguration"]["import_path"] is not False else params["FSxLustreConfiguration"]["s3_backend"]
                    fsx_lustre_configuration.ExportPath = params["FSxLustreConfiguration"]["import_path"] if params["FSxLustreConfiguration"]["import_path"] is not False else params["FSxLustreConfiguration"]["s3_backend"] + "/" + params["ClusterId"] + "-fsxoutput/job-" +  params["JobId"] + "/"
                    fsx_lustre.LustreConfiguration = fsx_lustre_configuration

                fsx_lustre.Tags = base_Tags(
                    # False disable PropagateAtLaunch
                    Name=str(params["ClusterId"] + "-compute-job-" + params["JobId"]),
                    _soca_JobId=str(params["JobId"]),
                    _soca_JobName=str(params["JobName"]),
                    _soca_JobQueue=str(params["JobQueue"]),
                    _soca_StackId=stack_name,
                    _soca_JobOwner=str(params["JobOwner"]),
                    _soca_JobProject=str(params["JobProject"]),
                    _soca_KeepForever=str(params["KeepForever"]).lower(),
                    _soca_FSx="true",
                    _soca_ClusterId=str(params["ClusterId"]),
                )
                t.add_resource(fsx_lustre)
        # End FSx For Lustre

        # Begin AutoScalingGroup Resource
        asg = AutoScalingGroup("AutoScalingComputeGroup")
        asg.DependsOn = "NodeLaunchTemplate"
        if mip_usage is True or instances_list.__len__() > 1:
            mip.LaunchTemplate = asg_lt
            asg.MixedInstancesPolicy = mip

        else:
            asg.LaunchTemplate = LaunchTemplateSpecification(
                LaunchTemplateId=Ref(lt),
                Version=GetAtt(lt, "LatestVersionNumber"))

        asg.MinSize = int(params["DesiredCapacity"])
        asg.MaxSize = int(params["DesiredCapacity"])
        asg.VPCZoneIdentifier = params["SubnetId"]

        if params["PlacementGroup"] is True:
            pg = PlacementGroup("ComputeNodePlacementGroup")
            pg.Strategy = "cluster"
            t.add_resource(pg)
            asg.PlacementGroup = Ref(pg)

        asg.Tags = Tags(
            Name=str(params["ClusterId"]) + "-compute-job-" + str(params["JobId"]),
            _soca_JobId=str(params["JobId"]),
            _soca_JobName=str(params["JobName"]),
            _soca_JobQueue=str(params["JobQueue"]),
            _soca_StackId=stack_name,
            _soca_JobOwner=str(params["JobOwner"]),
            _soca_JobProject=str(params["JobProject"]),
            _soca_KeepForever=str(params["KeepForever"]).lower(),
            _soca_ClusterId=str(params["ClusterId"]),
            _soca_NodeType="soca-compute-node")
        t.add_resource(asg)
        # End AutoScalingGroup Resource

        # Begin Custom Resource
        # Change Mapping to No if you want to disable this
        if allow_anonymous_data_collection is True:
            metrics = CustomResourceSendAnonymousMetrics("SendAnonymousData")
            metrics.ServiceToken = params["SolutionMetricLambda"]
            metrics.DesiredCapacity = str(params["DesiredCapacity"])
            metrics.InstanceType = str(params["InstanceType"])
            metrics.Efa = str(params["Efa"])
            metrics.ScratchSize = str(params["ScratchSize"])
            metrics.RootSize = str(params["RootSize"])
            metrics.SpotPrice = str(params["SpotPrice"])
            metrics.BaseOS = str(params["BaseOS"])
            metrics.StackUUID = str(params["StackUUID"])
            metrics.KeepForever = str(params["KeepForever"])
            metrics.FsxLustre = str(params["FSxLustreConfiguration"])
            t.add_resource(metrics)
            # End Custom Resource

        if debug is True:
            print(t.to_json())

        # Tags must use "soca:<Key>" syntax
        template_output = t.to_yaml().replace("_soca_", "soca:")
        return {'success': True,
                'output': template_output}

    except Exception as e:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
        return {'success': False,
                'output': 'cloudformation_builder.py: ' + (
                            str(e) + ': error :' + str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))}
Example #11
0
    def create_template(self):
        """Create template (main function called by Stacker)."""
        template = self.template
        variables = self.get_variables()
        template.add_version('2010-09-09')
        template.add_description('Kubernetes workers via EKS - V1.0.0 '
                                 '- compatible with amazon-eks-node-v23+')

        # Metadata
        template.add_metadata({
            'AWS::CloudFormation::Interface': {
                'ParameterGroups': [
                    {'Label': {'default': 'EKS Cluster'},
                     'Parameters': [variables[i].name
                                    for i
                                    in ['ClusterName',
                                        'ClusterControlPlaneSecurityGroup']]},
                    {'Label': {'default': 'Worker Node Configuration'},
                     'Parameters': [variables[i].name
                                    for i
                                    in ['NodeGroupName',
                                        'NodeAutoScalingGroupMinSize',
                                        'NodeAutoScalingGroupMaxSize',
                                        'UseDesiredInstanceCount',
                                        'NodeInstanceType',
                                        'NodeInstanceProfile',
                                        'NodeImageId',
                                        'NodeVolumeSize',
                                        'KeyName',
                                        'UseSpotInstances',
                                        'SpotBidPrice',
                                        'BootstrapArguments']]},
                    {'Label': {'default': 'Worker Network Configuration'},
                     'Parameters': [variables[i].name
                                    for i
                                    in ['VpcId', 'Subnets']]}
                ]
            }
        })

        # Conditions
        template.add_condition(
            'SetSpotPrice',
            Equals(variables['UseSpotInstances'].ref, 'yes')
        )
        template.add_condition(
            'DesiredInstanceCountSpecified',
            Equals(variables['UseDesiredInstanceCount'].ref, 'true')
        )
        template.add_condition(
            'KeyNameSpecified',
            Not(Equals(variables['KeyName'].ref, ''))
        )

        # Resources
        nodesecuritygroup = template.add_resource(
            ec2.SecurityGroup(
                'NodeSecurityGroup',
                GroupDescription='Security group for all nodes in the cluster',
                Tags=[
                    {'Key': Sub('kubernetes.io/cluster/${ClusterName}'),
                     'Value': 'owned'},
                ],
                VpcId=variables['VpcId'].ref
            )
        )
        template.add_output(
            Output(
                'NodeSecurityGroup',
                Description='Security group for all nodes in the cluster',
                Value=nodesecuritygroup.ref()
            )
        )
        template.add_resource(
            ec2.SecurityGroupIngress(
                'NodeSecurityGroupIngress',
                Description='Allow node to communicate with each other',
                GroupId=nodesecuritygroup.ref(),
                SourceSecurityGroupId=nodesecuritygroup.ref(),
                IpProtocol='-1',
                FromPort=0,
                ToPort=65535
            )
        )
        template.add_resource(
            ec2.SecurityGroupIngress(
                'NodeSecurityGroupFromControlPlaneIngress',
                Description='Allow worker Kubelets and pods to receive '
                            'communication from the cluster control plane',
                GroupId=nodesecuritygroup.ref(),
                SourceSecurityGroupId=variables['ClusterControlPlaneSecurityGroup'].ref,  # noqa
                IpProtocol='tcp',
                FromPort=1025,
                ToPort=65535
            )
        )
        template.add_resource(
            ec2.SecurityGroupEgress(
                'ControlPlaneEgressToNodeSecurityGroup',
                Description='Allow the cluster control plane to communicate '
                            'with worker Kubelet and pods',
                GroupId=variables['ClusterControlPlaneSecurityGroup'].ref,
                DestinationSecurityGroupId=nodesecuritygroup.ref(),
                IpProtocol='tcp',
                FromPort=1025,
                ToPort=65535
            )
        )
        template.add_resource(
            ec2.SecurityGroupIngress(
                'NodeSecurityGroupFromControlPlaneOn443Ingress',
                Description='Allow pods running extension API servers on port '
                            '443 to receive communication from cluster '
                            'control plane',
                GroupId=nodesecuritygroup.ref(),
                SourceSecurityGroupId=variables['ClusterControlPlaneSecurityGroup'].ref,  # noqa
                IpProtocol='tcp',
                FromPort=443,
                ToPort=443
            )
        )
        template.add_resource(
            ec2.SecurityGroupEgress(
                'ControlPlaneEgressToNodeSecurityGroupOn443',
                Description='Allow the cluster control plane to communicate '
                            'with pods running extension API servers on port '
                            '443',
                GroupId=variables['ClusterControlPlaneSecurityGroup'].ref,
                DestinationSecurityGroupId=nodesecuritygroup.ref(),
                IpProtocol='tcp',
                FromPort=443,
                ToPort=443
            )
        )
        template.add_resource(
            ec2.SecurityGroupIngress(
                'ClusterControlPlaneSecurityGroupIngress',
                Description='Allow pods to communicate with the cluster API '
                            'Server',
                GroupId=variables['ClusterControlPlaneSecurityGroup'].ref,
                SourceSecurityGroupId=nodesecuritygroup.ref(),
                IpProtocol='tcp',
                FromPort=443,
                ToPort=443
            )
        )

        nodelaunchconfig = template.add_resource(
            autoscaling.LaunchConfiguration(
                'NodeLaunchConfig',
                AssociatePublicIpAddress=True,
                IamInstanceProfile=variables['NodeInstanceProfile'].ref,
                ImageId=variables['NodeImageId'].ref,
                InstanceType=variables['NodeInstanceType'].ref,
                KeyName=If(
                    'KeyNameSpecified',
                    variables['KeyName'].ref,
                    NoValue
                ),
                SecurityGroups=[nodesecuritygroup.ref()],
                SpotPrice=If('SetSpotPrice',
                             variables['SpotBidPrice'].ref,
                             NoValue),
                BlockDeviceMappings=[autoscaling.BlockDeviceMapping(
                    DeviceName='/dev/xvda',
                    Ebs=autoscaling.EBSBlockDevice(
                        VolumeSize=variables['NodeVolumeSize'].ref,
                        VolumeType='gp2',
                        DeleteOnTermination=True
                    )
                )],
                UserData=Base64(
                    Sub('\n'.join([
                        '#!/bin/bash',
                        'set -o xtrace',
                        '/etc/eks/bootstrap.sh ${ClusterName} ${BootstrapArguments}',  # noqa
                        '/opt/aws/bin/cfn-signal --exit-code $? \\',
                        '--stack ${AWS::StackName} \\',
                        '--resource NodeGroup \\',
                        '--region ${AWS::Region}'
                    ]))
                )
            )
        )

        template.add_resource(
            autoscaling.AutoScalingGroup(
                'NodeGroup',
                DesiredCapacity=If(
                    'DesiredInstanceCountSpecified',
                    variables['NodeAutoScalingGroupMaxSize'].ref,
                    NoValue
                ),
                LaunchConfigurationName=nodelaunchconfig.ref(),
                MinSize=variables['NodeAutoScalingGroupMinSize'].ref,
                MaxSize=variables['NodeAutoScalingGroupMaxSize'].ref,
                VPCZoneIdentifier=variables['Subnets'].ref,
                Tags=[
                    autoscaling.Tag(
                        'Name',
                        Sub('${ClusterName}-${NodeGroupName}-Node'),
                        True),
                    autoscaling.Tag(
                        Sub('kubernetes.io/cluster/${ClusterName}'),
                        'owned',
                        True)
                ],
                UpdatePolicy=UpdatePolicy(
                    AutoScalingRollingUpdate=AutoScalingRollingUpdate(
                        MinInstancesInService='1',
                        MaxBatchSize='1'
                    )
                )
            )
        )
                FromPort="22",
                ToPort="22",
                CidrIp=PublicCidrIP,
            ),
            ec2.SecurityGroupRule(
                IpProtocol="tcp",
                FromPort=ApplicationPort,
                ToPort=ApplicationPort,
                CidrIp="0.0.0.0/0",
            ),
        ],
    ))

ud = Base64(
    Join('\n', [
        "#!/bin/bash", "sudo apt update && sudo apt install -y git ansible",
        AnsiblePullCmd, "echo '*/10 * * * * {}' > /etc/cron.d/ansiblepull".
        format(AnsiblePullCmd)
    ]))

t.add_resource(
    Role("Role",
         AssumeRolePolicyDocument=Policy(Statement=[
             Statement(Effect=Allow,
                       Action=[AssumeRole],
                       Principal=Principal("Service", ["ec2.amazonaws.com"]))
         ])))

t.add_resource(
    InstanceProfile("InstanceProfile", Path="/", Roles=[Ref("Role")]))

t.add_resource(
Example #13
0
    def add_resources(self):

        self.CassandraPublicLBSG = self.template.add_resource(
            ec2.SecurityGroup(
                "CassandraPublicLBSG",
                GroupDescription=
                "Loadbalancer Security Group For Cassandra Public LB",
                VpcId=Ref(self.VpcId),
                SecurityGroupIngress=[
                    ec2.SecurityGroupRule(
                        IpProtocol="tcp",
                        FromPort=22,
                        ToPort=22,
                        CidrIp=Ref(self.AdminCidrBlock),
                    ),
                ],
                Tags=self.base_tags +
                Tags(Name=self.environment_parameters["ClientEnvironmentKey"] +
                     "-CassandraPublicLBSG"),
            ))

        self.CassandraSG = self.template.add_resource(
            ec2.SecurityGroup(
                "CassandraSG",
                GroupDescription=
                "Allow communication between Cassandra Seed and Non-Seed Nodes",
                VpcId=Ref(self.VpcId),
                SecurityGroupIngress=[
                    ec2.SecurityGroupRule(
                        IpProtocol="tcp",
                        FromPort=22,
                        ToPort=22,
                        SourceSecurityGroupId=Ref(self.CassandraPublicLBSG),
                    ),
                ],
                Tags=self.base_tags +
                Tags(Name=self.environment_parameters["ClientEnvironmentKey"] +
                     "-CassandraEc2SG"),
            ))

        self.CassandraSGInterNodeCommunicationIngress = self.template.add_resource(
            ec2.SecurityGroupIngress(
                "CassandraSGInterNodeCommunicationIngress",
                DependsOn=self.CassandraSG,
                GroupId=Ref(self.CassandraSG),
                IpProtocol="tcp",
                FromPort=7000,
                ToPort=7001,
                SourceSecurityGroupId=Ref(self.CassandraSG),
            ))

        self.CassandraSeedNetworkInterface = self.template.add_resource(
            ec2.NetworkInterface(
                "Eth0",
                Description="eth0",
                GroupSet=[Ref(self.CassandraSG)],
                SubnetId=Ref(self.RESTPrivSubnet1),
                PrivateIpAddress="10.0.1.132",
                Tags=self.base_tags +
                Tags(Name=self.environment_parameters["ClientEnvironmentKey"] +
                     "-CassandraSeedNetworkInterface"),
            ))

        self.CassandraSeed1 = self.template.add_resource(
            ec2.Instance(
                "CassandraSeed1",
                ImageId=Ref(self.CassandraImageId),
                KeyName=Ref(self.CassandraServerKeyName),
                InstanceType=Ref(self.CassandraServerInstanceType),
                IamInstanceProfile=Ref(self.CassandraServerIAMInstanceProfile),
                NetworkInterfaces=[
                    ec2.NetworkInterfaceProperty(
                        NetworkInterfaceId=Ref(
                            self.CassandraSeedNetworkInterface),
                        DeviceIndex="0",
                    ),
                ],
                UserData=Base64(
                    Join('', [
                        "#!/bin/bash -x\n", "export NODE_IP=`hostname -I`\n",
                        "export SEED_LIST=\"10.0.1.132\"\n",
                        "export CASSANDRA_YML=\"/etc/cassandra/conf/cassandra.yaml\"\n",
                        "export CLUSTER_NAME=\"devops_cluster\"\n",
                        "export SNITCH_TYPE=\"Ec2Snitch\"\n",
                        "sed -i \"/cluster_name:/c\\cluster_name: \\'${CLUSTER_NAME}\\'\"  ${CASSANDRA_YML}\n",
                        "sed -i \"/- seeds:/c\\          - seeds: \\\"${SEED_LIST}\\\"\"     ${CASSANDRA_YML}\n",
                        "sed -i \"/listen_address:/c\\listen_address: ${NODE_IP}\"       ${CASSANDRA_YML}\n",
                        "sed -i \"/rpc_address:/c\\rpc_address: ${NODE_IP}\"             ${CASSANDRA_YML}\n",
                        "sed -i \"/endpoint_snitch:/c\\endpoint_snitch: ${SNITCH_TYPE}\" ${CASSANDRA_YML}\n",
                        "sed -i \"/authenticator: AllowAllAuthenticator/c\\authenticator: PasswordAuthenticator\" ${CASSANDRA_YML}\n"
                        "echo 'auto_bootstrap: false' >> ${CASSANDRA_YML}\n",
                        "service cassandra start\n"
                    ])),
                Tags=self.base_tags +
                Tags(Name=self.environment_parameters["ClientEnvironmentKey"] +
                     "-CassandraSeed-1-Ec2"),
            ))

        self.CassandraPublicLoadBalancer = self.template.add_resource(
            elb.LoadBalancer(
                "CassandraPublicLoadBalancer",
                LoadBalancerName=self.
                environment_parameters["ClientEnvironmentKey"] +
                "-CassandraNonSeedPubLB",
                Scheme="internet-facing",
                Listeners=[
                    elb.Listener(
                        LoadBalancerPort="22",
                        InstancePort="22",
                        Protocol="TCP",
                        InstanceProtocol="TCP",
                    )
                ],
                Instances=[],
                SecurityGroups=[Ref(self.CassandraPublicLBSG)],
                Subnets=[Ref(self.RESTPubSubnet1)],
                Tags=self.base_tags +
                Tags(Name=self.environment_parameters["ClientEnvironmentKey"] +
                     "-CassandraNonSeedPubLB"),
            ))

        self.CassandraNonSeedLaunchConfiguration = self.template.add_resource(
            LaunchConfiguration(
                "CassandraNonSeedLaunchConfiguration",
                ImageId=Ref(self.CassandraImageId),
                InstanceType=Ref(self.CassandraServerInstanceType),
                IamInstanceProfile=Ref(self.CassandraServerIAMInstanceProfile),
                KeyName=Ref(self.CassandraServerKeyName),
                SecurityGroups=[Ref(self.CassandraSG)],
                UserData=Base64(
                    Join('', [
                        "#!/bin/bash -x\n", "export NODE_IP=`hostname -I`\n",
                        "export SEED_LIST=\"10.0.1.132\"\n",
                        "export CASSANDRA_YML=\"/etc/cassandra/conf/cassandra.yaml\"\n",
                        "export CLUSTER_NAME=\"devoops_cluster\"\n",
                        "export SNITCH_TYPE=\"Ec2Snitch\"\n",
                        "sed -i \"/cluster_name:/c\\cluster_name: \\'${CLUSTER_NAME}\\'\"  ${CASSANDRA_YML}\n",
                        "sed -i \"/- seeds:/c\\          - seeds: \\\"${SEED_LIST}\\\"\"     ${CASSANDRA_YML}\n",
                        "sed -i \"/listen_address:/c\\listen_address: ${NODE_IP}\"       ${CASSANDRA_YML}\n",
                        "sed -i \"/rpc_address:/c\\rpc_address: ${NODE_IP}\"             ${CASSANDRA_YML}\n",
                        "sed -i \"/endpoint_snitch:/c\\endpoint_snitch: ${SNITCH_TYPE}\" ${CASSANDRA_YML}\n",
                        "sed -i \"/authenticator: AllowAllAuthenticator/c\\authenticator: PasswordAuthenticator\" ${CASSANDRA_YML}\n",
                        "echo 'auto_bootstrap: false' >> ${CASSANDRA_YML}\n",
                        "service cassandra start\n"
                    ])),
            ))

        self.CassandraNonSeedAutoScalingGroup = self.template.add_resource(
            AutoScalingGroup(
                "CassandraNonSeedAutoscalingGroup",
                AutoScalingGroupName=self.
                environment_parameters["ClientEnvironmentKey"] +
                "-CassandraNonSeedAutoScalingGroup",
                LaunchConfigurationName=Ref(
                    self.CassandraNonSeedLaunchConfiguration),
                LoadBalancerNames=[Ref(self.CassandraPublicLoadBalancer)],
                MaxSize="1",
                MinSize="1",
                DesiredCapacity="1",
                VPCZoneIdentifier=[Ref(self.RESTPrivSubnet1)],
                Tags=[
                    AutoScalingTag(
                        "Name",
                        self.environment_parameters["ClientEnvironmentKey"] +
                        "-CassandraNonSeedEc2", True),
                    AutoScalingTag(
                        "Environment",
                        self.environment_parameters["EnvironmentName"], True),
                    AutoScalingTag(
                        "ResourceOwner",
                        self.environment_parameters["ResourceOwner"], True),
                    AutoScalingTag(
                        "ClientCode",
                        self.environment_parameters["ClientEnvironmentKey"],
                        True),
                ],
            ))
                remote_ip_prefix="0.0.0.0/0",
            ),
        ]
    )
)


openstack_instance = template.add_resource(nova.Server(
    "OpenStackInstance",
    image="MyImage",
    flavor="t1.micro",
    key_name=Ref(keyname_param),
    networks=[neutron.Port(
        "OpenStackPort",
        fixed_ips=[neutron.FixedIP(
            ip_address="192.168.1.20"
        )],
        network_id="3e47c369-7007-472e-9e96-7dadb51e3e99",
        security_groups=[Ref(security_group)],
    )],
    user_data=Base64(Join('\n', [
        "#!/bin/bash",
        "echo \"Upgrade started at $(date)\"",
        "apt-get update",
        "apt-get -y upgrade",
        "echo \"Upgrade complete at $(date)\"",
    ]))
))

print(template.to_json())
            ),
            ec2.SecurityGroupRule(
                IpProtocol="tcp",
                FromPort=ApplicationPort,
                ToPort=ApplicationPort,
                CidrIp="0.0.0.0/0",
            ),
        ],
    ))

ud = Base64(
    Join('\n', [
        "#!/bin/bash", "sudo yum -y update", "sudo yum install ruby",
        "sudo yum -y install java-1.8.0",
        "sudo yum -y remove java-1.7.0-openjdk",
        "curl --silent --location https://rpm.nodesource.com/setup_10.x | sudo bash -",
        "sudo yum -y install nodejs", "yum install --enablerepo=epel -y git",
        "pip install ansible", AnsiblePullCmd,
        "echo '*/5 * * * * root {}' > /etc/cron.d/ansible-pull".format(
            AnsiblePullCmd)
    ]))

t.add_resource(
    Role("Role",
         AssumeRolePolicyDocument=Policy(Statement=[
             Statement(Effect=Allow,
                       Action=[AssumeRole],
                       Principal=Principal("Service", ["ec2.amazonaws.com"]))
         ])))

t.add_resource(
Example #16
0
def buildInstance(t, args):
    t.add_resource(
        ec2.SecurityGroup('WebserverIngressSG',
                          GroupDescription='Global Webserver Access',
                          VpcId=Ref('VPC'),
                          Tags=Tags(Name='Global Webserver Access')))

    t.add_resource(
        ec2.SecurityGroupIngress('WebserverIngressSG80',
                                 GroupId=Ref('WebserverIngressSG'),
                                 IpProtocol='tcp',
                                 CidrIp='0.0.0.0/0',
                                 FromPort='80',
                                 ToPort='80'))

    t.add_resource(
        ec2.SecurityGroupIngress('WebserverIngress443',
                                 GroupId=Ref('WebserverIngressSG'),
                                 IpProtocol='tcp',
                                 CidrIp='0.0.0.0/0',
                                 FromPort='443',
                                 ToPort='443'))

    t.add_resource(
        ec2.SecurityGroup('SysAdminAccessSG',
                          GroupDescription='System Administrator Access',
                          VpcId=Ref('VPC'),
                          Tags=Tags(Name='System Administrator Access')))

    if (args.dev):
        t.add_resource(
            ec2.SecurityGroupIngress('DevSysadminIngress22',
                                     GroupId=Ref('SysAdminAccessSG'),
                                     IpProtocol='tcp',
                                     CidrIp='0.0.0.0/0',
                                     FromPort='22',
                                     ToPort='22'))

    rolePolicyStatements = [{
        "Sid":
        "Stmt1500699052003",
        "Effect":
        "Allow",
        "Action": ["s3:ListBucket"],
        "Resource": [Join("",
                          ["arn:aws:s3:::", Ref('S3Bucket')])]
    }, {
        "Sid":
        "Stmt1500699052000",
        "Effect":
        "Allow",
        "Action": ["s3:PutObject", "s3:GetObject", "s3:DeleteObject"],
        "Resource":
        [Join("",
              ["arn:aws:s3:::", Ref('S3Bucket'), '/Backup/*'])]
    }, {
        "Sid":
        "Stmt1500612724002",
        "Effect":
        "Allow",
        "Action": ["kms:Encrypt", "kms:Decrypt", "kms:GenerateDataKey*"],
        "Resource": [OpenEMRKeyARN]
    }]

    if (args.recovery):
        rolePolicyStatements.extend([
            {
                "Sid": "Stmt1500699052004",
                "Effect": "Allow",
                "Action": ["s3:ListBucket"],
                "Resource":
                [Join(
                    "",
                    ["arn:aws:s3:::", Ref('RecoveryS3Bucket')])]
            },
            {
                "Sid":
                "Stmt1500699052005",
                "Effect":
                "Allow",
                "Action": [
                    "s3:GetObject",
                ],
                "Resource": [
                    Join("", [
                        "arn:aws:s3:::",
                        Ref('RecoveryS3Bucket'), '/Backup/*'
                    ])
                ]
            },
        ])

    t.add_resource(
        iam.ManagedPolicy('WebserverPolicy',
                          Description='Policy for webserver instance',
                          PolicyDocument={
                              "Version": "2012-10-17",
                              "Statement": rolePolicyStatements
                          }))

    t.add_resource(
        iam.Role('WebserverRole',
                 AssumeRolePolicyDocument={
                     "Version":
                     "2012-10-17",
                     "Statement": [{
                         "Effect": "Allow",
                         "Principal": {
                             "Service": ["ec2.amazonaws.com"]
                         },
                         "Action": ["sts:AssumeRole"]
                     }]
                 },
                 Path='/',
                 ManagedPolicyArns=[Ref('WebserverPolicy')]))

    t.add_resource(
        iam.InstanceProfile('WebserverInstanceProfile',
                            Path='/',
                            Roles=[Ref('WebserverRole')]))

    t.add_resource(
        ec2.Volume('DockerVolume',
                   DeletionPolicy='Delete' if args.dev else 'Snapshot',
                   Size=Ref('PracticeStorage'),
                   AvailabilityZone=Select("0", GetAZs("")),
                   VolumeType='gp2',
                   Encrypted=True,
                   KmsKeyId=OpenEMRKeyID,
                   Tags=Tags(Name="OpenEMR Practice")))

    bootstrapScript = [
        "#!/bin/bash -x\n", "exec > /var/log/openemr-cfn-bootstrap 2>&1\n",
        "cfn-init -v ", "         --stack ", ref_stack_name,
        "         --resource WebserverInstance ",
        "         --configsets Setup ", "         --region ", ref_region, "\n",
        "cfn-signal -e $? ", "         --stack ", ref_stack_name,
        "         --resource WebserverInstance ", "         --region ",
        ref_region, "\n"
    ]

    setupScript = [
        "#!/bin/bash -xe\n", "exec > /tmp/cloud-setup.log 2>&1\n",
        "/root/openemr-devops/packages/standard/ami/ami-configure.sh\n"
    ]

    stackPassthroughFile = [
        "S3=", Ref('S3Bucket'), "\n", "KMS=", OpenEMRKeyID, "\n"
    ]

    if (args.recovery):
        stackPassthroughFile.extend([
            "RECOVERYS3=",
            Ref('RecoveryS3Bucket'),
            "\n",
            "RECOVERY_NEWRDS=",
            GetAtt('RDSInstance', 'Endpoint.Address'),
            "\n",
        ])

    if (args.recovery):
        dockerComposeFile = [
            "version: '3.1'\n", "services:\n", "  openemr:\n",
            "    restart: always\n", "    image: openemr/openemr",
            docker_version, "\n", "    ports:\n", "    - 80:80\n",
            "    - 443:443\n", "    volumes:\n",
            "    - logvolume01:/var/log\n",
            "    - sitevolume:/var/www/localhost/htdocs/openemr/sites\n",
            "    environment:\n", "      MANUAL_SETUP: 1\n", "volumes:\n",
            "  logvolume01: {}\n", "  sitevolume: {}\n"
        ]
    else:
        dockerComposeFile = [
            "version: '3.1'\n", "services:\n", "  openemr:\n",
            "    restart: always\n", "    image: openemr/openemr",
            docker_version, "\n", "    ports:\n", "    - 80:80\n",
            "    - 443:443\n", "    volumes:\n",
            "    - logvolume01:/var/log\n",
            "    - sitevolume:/var/www/localhost/htdocs/openemr/sites\n",
            "    environment:\n", "      MYSQL_HOST: '",
            GetAtt('RDSInstance', 'Endpoint.Address'), "'\n",
            "      MYSQL_ROOT_USER: openemr\n", "      MYSQL_ROOT_PASS: '******'RDSPassword'), "'\n", "      MYSQL_USER: openemr\n",
            "      MYSQL_PASS: '******'RDSPassword'), "'\n", "      OE_USER: admin\n",
            "      OE_PASS: '******'AdminPassword'), "'\n", "volumes:\n", "  logvolume01: {}\n",
            "  sitevolume: {}\n"
        ]

    bootstrapInstall = cloudformation.InitConfig(
        files={
            "/root/cloud-setup.sh": {
                "content": Join("", setupScript),
                "mode": "000500",
                "owner": "root",
                "group": "root"
            },
            "/root/cloud-variables": {
                "content": Join("", stackPassthroughFile),
                "mode": "000500",
                "owner": "root",
                "group": "root"
            },
            "/root/openemr-devops/packages/standard/docker-compose.yaml": {
                "content": Join("", dockerComposeFile),
                "mode": "000500",
                "owner": "root",
                "group": "root"
            }
        },
        commands={"01_setup": {
            "command": "/root/cloud-setup.sh"
        }})

    bootstrapMetadata = cloudformation.Metadata(
        cloudformation.Init(cloudformation.InitConfigSets(Setup=['Install']),
                            Install=bootstrapInstall))

    t.add_resource(
        ec2.Instance('WebserverInstance',
                     Metadata=bootstrapMetadata,
                     ImageId=FindInMap('RegionData', ref_region,
                                       'OpenEMRMktPlaceAMI'),
                     InstanceType=Ref('WebserverInstanceSize'),
                     NetworkInterfaces=[
                         ec2.NetworkInterfaceProperty(
                             AssociatePublicIpAddress=True,
                             DeviceIndex="0",
                             GroupSet=[
                                 Ref('ApplicationSecurityGroup'),
                                 Ref('WebserverIngressSG'),
                                 Ref('SysAdminAccessSG')
                             ],
                             SubnetId=Ref('PublicSubnet1'))
                     ],
                     KeyName=Ref('EC2KeyPair'),
                     IamInstanceProfile=Ref('WebserverInstanceProfile'),
                     Volumes=[{
                         "Device": "/dev/sdd",
                         "VolumeId": Ref('DockerVolume')
                     }],
                     Tags=Tags(Name='OpenEMR Cloud Standard'),
                     InstanceInitiatedShutdownBehavior='stop',
                     UserData=Base64(Join('', bootstrapScript)),
                     CreationPolicy={"ResourceSignal": {
                         "Timeout": "PT15M"
                     }}))

    return t
Example #17
0
        "export ambari_pass="******"\n",
        "export deploy=",
        ref_deploy_cluster,
        "\n",
    ]
    return exports + bootstrap_script_body.splitlines(True)


AmbariNode = t.add_resource(
    ec2.Instance(
        "AmbariNode",
        UserData=Base64(
            Join(
                "",
                my_bootstrap_script('AmbariNode', 'true', 'true',
                                    '127.0.0.1'))),
        ImageId=FindInMap("CENTOS7", Ref("AWS::Region"), "AMI"),
        BlockDeviceMappings=my_block_device_mappings_ephemeral(24, "/dev/sd"),
        CreationPolicy=CreationPolicy(
            ResourceSignal=ResourceSignal(Count=1, Timeout="PT15M")),
        Tags=Tags(Name=ref_stack_name, ),
        KeyName=Ref(KeyName),
        InstanceType=Ref(InstanceType),
        SubnetId=Ref(SubnetId),
        SecurityGroupIds=Ref(SecurityGroups),
    ))

t.add_output([
    Output(
Example #18
0
 def test_parameterized_codec_b64(self) -> None:
     """Test parameterized codec b64."""
     expected = Base64(Join("", ["Test ", {"Ref": "Interpolation"}, " Here"]))
     out = parameterized_codec("Test {{Interpolation}} Here", True)
     self.assertEqual(Base64, out.__class__)
     self.assertTemplateEqual(expected, out)
WebServerGroup = t.add_resource(AutoScalingGroup(
    "WebServerGroup",
    DesiredCapacity=Ref(WebServerCapacity),
    LaunchConfigurationName=Ref("LaunchConfig"),
    MinSize="1",
    MaxSize="5",
    LoadBalancerNames=[Ref("ElasticLoadBalancer")],
    AvailabilityZones=GetAZs(""),
))

init_config = { "config": InitConfig({})}

LaunchConfig = t.add_resource(LaunchConfiguration(
    "LaunchConfig",
    Metadata=Init(init_config),
    UserData=Base64(Join("", ["#!/bin/bash -xe\n", "yum update -y aws-cfn-bootstrap\n", "# Install the files and packages from the metadata\n", "/opt/aws/bin/cfn-init -v ", "         --stack ", Ref("AWS::StackName"), "         --resource LaunchConfig ", "         --region ", Ref("AWS::Region"), "\n", "# Signal the status from cfn-init\n", "/opt/aws/bin/cfn-signal -e $? ", "         --stack ", Ref("AWS::StackName"), "         --resource WebServerGroup ", "         --region ", Ref("AWS::Region"), "\n"])),
    KeyName=Ref(KeyName),
    SecurityGroups=[Ref(WebServerSecurityGroup)],
    InstanceType=Ref(InstanceType),
    ImageId=FindInMap("AWSRegionArch2AMI", Ref("AWS::Region"), FindInMap("AWSInstanceType2Arch", Ref(InstanceType), "Arch")),
))

DBEC2SecurityGroup = t.add_resource(SecurityGroup(
    "DBEC2SecurityGroup",
    SecurityGroupIngress=[{ "ToPort": "3306", "IpProtocol": "tcp", "SourceSecurityGroupName": Ref(WebServerSecurityGroup), "FromPort": "3306" }],
    GroupDescription="Open database for access",
    Condition="Is-EC2-VPC",
))

ElasticLoadBalancer = t.add_resource(LoadBalancer(
    "ElasticLoadBalancer",
Example #20
0
    def add_resources(self):

        metadata = {
            "AWS::CloudFormation::Init": {
                "configSets": {
                    "wordpress_install": [
                        "install_wordpress"]
                },
                "install_wordpress": {
                    "packages": {
                        "apt": {
                            "apache2": [],
                            "php": [],
                            "php-mysql": [],
                            "php7.0": [],
                            "php7.0-mysql": [],
                            "libapache2-mod-php7.0": [],
                            "php7.0-cli": [],
                            "php7.0-cgi": [],
                            "php7.0-gd": [],
                            "mysql-client": [],
                            "sendmail": []
                        }
                    },
                    "sources": {
                        "/var/www/html": "http://wordpress.org/latest.tar.gz"
                    },
                    "files": {
                        "/tmp/create-wp-config": {
                            "content": {
                                "Fn::Join": ["", [
                                    "#!/bin/bash\n",
                                    "cp /var/www/html/wordpress/wp-config-sample.php /var/www/html/wordpress/wp-config.php\n",
                                    "sed -i \"s/'database_name_here'/'", Ref(
                                        self.DBName), "'/g\" wp-config.php\n",
                                    "sed -i \"s/'username_here'/'", Ref(
                                        self.DBUser), "'/g\" wp-config.php\n",
                                    "sed -i \"s/'password_here'/'", Ref(
                                        self.DBPass), "'/g\" wp-config.php\n",
                                    "sed -i \"s/'localhost'/'", Ref(
                                        self.RDSEndpoint), "'/g\" wp-config.php\n"
                                ]]
                            },
                            "mode": "000500",
                            "owner": "root",
                            "group": "root"
                        }
                    },
                    "commands": {
                        "01_configure_wordpress": {
                            "command": "/tmp/create-wp-config",
                            "cwd": "/var/www/html/wordpress"
                        }
                    }
                }
            }
        }

        self.WaitHandle = self.template.add_resource(cloudformation.WaitConditionHandle(
            "WaitHandle",
        ))

        self.WaitCondition = self.template.add_resource(cloudformation.WaitCondition(
            "WaitCondition",
            Handle=Ref(self.WaitHandle),
            Timeout="600",
            DependsOn="WebServerAutoScalingGroup",
        ))

        self.WebServerLaunchConfiguration = self.template.add_resource(autoscaling.LaunchConfiguration(
            "WebServerLaunchConfiguration",
            Metadata=metadata,
            UserData=Base64(Join("", [
                "#!/bin/bash -x\n",
                "apt-get update\n",
                "apt-get install python-pip nfs-common -y \n",
                "mkdir -p /var/www/html/\n",
                "EC2_AZ=$(curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone)\n",
                "echo \"$EC2_AZ.", Ref(self.FileSystemID), ".efs.", Ref(
                    "AWS::Region"), ".amazonaws.com:/ /var/www/html/ nfs4 nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2 0 0\" >> /etc/fstab\n"
                "mount -a\n",
                "pip install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz\n",

                # "exec > /tmp/userdata.log 2>&1\n",
                "/usr/local/bin/cfn-init -v  --stack ", Ref("AWS::StackName"),
                "         --resource WebServerLaunchConfiguration ",
                "         --configsets wordpress_install ",
                "         --region ", Ref("AWS::Region"),
                "\n",
                "/bin/mv /var/www/html/wordpress/* /var/www/html/\n",
                "/bin/rm -f /var/www/html/index.html\n",
                "/bin/rm -rf /var/www/html/wordpress/\n",
                "chown www-data:www-data /var/www/html/* -R\n",
                "/usr/sbin/service apache2 restart\n",
                "/usr/bin/curl -O https://raw.githubusercontent.com/wp-cli/builds/gh-pages/phar/wp-cli.phar\n",
                "/bin/chmod +x wp-cli.phar\n",
                "/bin/mv wp-cli.phar /usr/local/bin/wp\n",
                "cd /var/www/html/\n",
                "if ! $(sudo -u www-data /usr/local/bin/wp core is-installed); then\n",
                "sudo -u www-data /usr/local/bin/wp core install ",
                "--url='", Ref(self.Hostname), ".", Ref(self.Domain), "' ",
                "--title='Cloudreach Meetup - ", Ref(
                    self.Environment), "' ",
                "--admin_user='******' ",
                "--admin_password='******' ",
                "--admin_email='*****@*****.**'\n",
                "wget  https://s3-eu-west-1.amazonaws.com/sceptre-meetup-munich/header.jpg -O /var/www/html/wp-content/themes/twentyseventeen/assets/images/header.jpg\n",
                "chown www-data:www-data /var/www/html/wp-content/themes/twentyseventeen/assets/images/header.jpg\n",

                "fi\n",

                "/usr/local/bin/cfn-signal -e $? --stack ", Ref(
                    "AWS::StackName"), "   -r \"Webserver setup complete\" '", Ref(self.WaitHandle), "'\n"

            ]
            )),
            ImageId=FindInMap("AWSRegion2AMI", Ref("AWS::Region"), "AMI"),
            KeyName=Ref(self.KeyName),
            SecurityGroups=[Ref(self.WebSecurityGroup)],
            InstanceType=Ref(self.InstanceType),
            AssociatePublicIpAddress=True,
        ))

        self.WebServerAutoScalingGroup = self.template.add_resource(autoscaling.AutoScalingGroup(
            "WebServerAutoScalingGroup",
            MinSize=Ref(self.WebServerCapacity),
            DesiredCapacity=Ref(self.WebServerCapacity),
            MaxSize=Ref(self.WebServerCapacity),
            VPCZoneIdentifier=[Ref(self.Subnet1), Ref(self.Subnet2)],
            AvailabilityZones=[Ref(self.AvailabilityZone1),
                               Ref(self.AvailabilityZone2)],
            Tags=autoscaling.Tags(
                Name=Join("-", [Ref(self.Project), "web", "asg"]),
                Environment=Ref(self.Environment),
                Project=Ref(self.Project),
            ),
            LoadBalancerNames=[Ref(self.ElasticLoadBalancer)],
            LaunchConfigurationName=Ref(self.WebServerLaunchConfiguration),
        ))

        self.WebServerScaleUpPolicy = self.template.add_resource(autoscaling.ScalingPolicy(
            "WebServerScaleUpPolicy",
            ScalingAdjustment="1",
            Cooldown="60",
            AutoScalingGroupName=Ref(self.WebServerAutoScalingGroup),
            AdjustmentType="ChangeInCapacity",
        ))

        self.WebServerScaleDownPolicy = self.template.add_resource(autoscaling.ScalingPolicy(
            "WebServerScaleDownPolicy",
            ScalingAdjustment="-1",
            Cooldown="60",
            AutoScalingGroupName=Ref(self.WebServerAutoScalingGroup),
            AdjustmentType="ChangeInCapacity",
        ))

        self.CPUAlarmLow = self.template.add_resource(cloudwatch.Alarm(
            "CPUAlarmLow",
            EvaluationPeriods="2",
            Dimensions=[
                cloudwatch.MetricDimension(
                    Name="AutoScalingGroupName",
                    Value=Ref(self.WebServerAutoScalingGroup)
                ),
            ],
            AlarmActions=[Ref(self.WebServerScaleDownPolicy)],
            AlarmDescription="Scale-down if CPU < 70% for 1 minute",
            Namespace="AWS/EC2",
            Period="60",
            ComparisonOperator="LessThanThreshold",
            Statistic="Average",
            Threshold="70",
            MetricName="CPUUtilization",
        ))

        self.CPUAlarmHigh = self.template.add_resource(cloudwatch.Alarm(
            "CPUAlarmHigh",
            EvaluationPeriods="2",
            Dimensions=[
                cloudwatch.MetricDimension(
                    Name="AutoScalingGroupName",
                    Value=Ref("WebServerAutoScalingGroup")
                ),
            ],
            AlarmActions=[Ref(self.WebServerScaleUpPolicy)],
            AlarmDescription="Scale-up if CPU > 50% for 1 minute",
            Namespace="AWS/EC2",
            Period="60",
            ComparisonOperator="GreaterThanThreshold",
            Statistic="Average",
            Threshold="50",
            MetricName="CPUUtilization",
        ))
Example #21
0
def main():
    template = Template()

    template.add_resource(
        ecs.Cluster("ECSCluster", ClusterName="WorldCheckCluster"))

    template.add_resource(
        iam.Role("ECSTaskRole",
                 AssumeRolePolicyDocument={
                     "Version":
                     "2012-10-17",
                     "Statement": [{
                         "Effect": "Allow",
                         "Principal": {
                             "Service": ["ecs-tasks.amazonaws.com"]
                         },
                         "Action": ["sts:AssumeRole"]
                     }]
                 }))

    template.add_resource(
        iam.Role(
            "ECSServiceSchedulerRole",
            AssumeRolePolicyDocument={
                "Version":
                "2012-10-17",
                "Statement": [{
                    "Effect": "Allow",
                    "Principal": {
                        "Service": ["ecs.amazonaws.com"]
                    },
                    "Action": ["sts:AssumeRole"]
                }]
            },
            Policies=[
                iam.Policy(PolicyDocument={
                    "Version":
                    "2012-10-17",
                    "Statement": [{
                        "Effect":
                        "Allow",
                        "Action": [
                            "ec2:Describe*",
                            "elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
                            "elasticloadbalancing:DeregisterTargets",
                            "elasticloadbalancing:Describe*",
                            "elasticloadbalancing:RegisterInstancesWithLoadBalancer",
                            "elasticloadbalancing:RegisterTargets"
                        ],
                        "Resource":
                        "*"
                    }]
                },
                           PolicyName="ecs-service")
            ]))

    template.add_resource(
        iam.Role("EC2InstanceRole",
                 AssumeRolePolicyDocument={
                     "Version":
                     "2012-10-17",
                     "Statement": [{
                         "Effect": "Allow",
                         "Principal": {
                             "Service": ["ec2.amazonaws.com"]
                         },
                         "Action": ["sts:AssumeRole"]
                     }]
                 },
                 Policies=[
                     iam.Policy(PolicyDocument={
                         "Version":
                         "2012-10-17",
                         "Statement": [{
                             "Effect":
                             "Allow",
                             "Action": [
                                 "ecs:CreateCluster",
                                 "ecs:DeregisterContainerInstance",
                                 "ecs:DiscoverPollEndpoint", "ecs:Poll",
                                 "ecs:RegisterContainerInstance",
                                 "ecs:StartTelemetrySession",
                                 "ecr:GetAuthorizationToken",
                                 "ecr:BatchGetImage",
                                 "ecr:GetDownloadUrlForLayer", "ecs:Submit*",
                                 "logs:CreateLogStream", "logs:PutLogEvents",
                                 "ec2:DescribeTags", "cloudwatch:PutMetricData"
                             ],
                             "Resource":
                             "*"
                         }]
                     },
                                PolicyName="ecs-service")
                 ]))
    template.add_resource(
        iam.InstanceProfile("EC2InstanceProfile",
                            Roles=[Ref("EC2InstanceRole")]))

    with open("user-data.sh", "r") as f:
        user_data_content = f.readlines()

    template.add_resource(
        ec2.Instance(
            "EC2Instance",
            ImageId="ami-13f7226a",
            InstanceType="t2.micro",
            SecurityGroups=["default"],
            UserData=Base64(Join('', [Sub(x) for x in user_data_content])),
            IamInstanceProfile=Ref("EC2InstanceProfile"),
        ))

    template.add_resource(
        ecs.TaskDefinition(
            "ECSTaskDefinition",
            TaskRoleArn=Ref("ECSTaskRole"),
            ContainerDefinitions=[
                ecs.ContainerDefinition(
                    Name="SimpleServer",
                    Memory="128",
                    Image="abbas123456/simple-server:latest",
                    PortMappings=[ecs.PortMapping(ContainerPort=8000)],
                )
            ]))

    template.add_resource(
        elb.TargetGroup(
            "ECSTargetGroup",
            VpcId="vpc-925497f6",
            Port=8000,
            Protocol="HTTP",
        ))

    template.add_resource(
        elb.LoadBalancer(
            "LoadBalancer",
            Subnets=["subnet-a321c8fb", "subnet-68fa271e", "subnet-689d350c"],
            SecurityGroups=["sg-0202bd65"]))

    template.add_resource(
        elb.Listener(
            "LoadBalancerListener",
            DefaultActions=[
                elb.Action(Type="forward",
                           TargetGroupArn=Ref("ECSTargetGroup"))
            ],
            LoadBalancerArn=Ref("LoadBalancer"),
            Port=80,
            Protocol="HTTP",
        ))

    template.add_resource(
        ecs.Service("ECSService",
                    Cluster=Ref("ECSCluster"),
                    DesiredCount=1,
                    LoadBalancers=[
                        ecs.LoadBalancer(ContainerPort=8000,
                                         ContainerName="SimpleServer",
                                         TargetGroupArn=Ref("ECSTargetGroup"))
                    ],
                    Role=Ref("ECSServiceSchedulerRole"),
                    TaskDefinition=Ref("ECSTaskDefinition"),
                    DependsOn="LoadBalancerListener"))

    return template.to_json()
Example #22
0
    SecurityGroups=[Ref("LoadBalancerSecurityGroup")],
))

t.add_output(Output(
    "WebUrl",
    Description="Application endpoint",
    Value=Join("", [
        "http://", GetAtt("LoadBalancer", "DNSName"),
        ":", ApplicationPort
    ]),
))

ud = Base64(Join("\n", [
    "#!/bin/bash",
    "exec > /var/log/userdata.log 2>&1",
    "sudo yum install --enablerepo=epel -y git",
    "sudo pip install ansible",
    AnsiblePullCmd,
    "echo '*/10 * * * * {}' > /etc/cron.d/ansible-pull".format(AnsiblePullCmd)
]))

t.add_resource(Role(
    "Role",
    AssumeRolePolicyDocument=Policy(
        Statement=[
            Statement(
                Effect=Allow,
                Action=[AssumeRole],
                Principal=Principal("Service", ["ec2.amazonaws.com"])
            )
        ]
    )
Example #23
0
def main():
    t = Template("A template to create a load balanced autoscaled Web flask deployment using ansible.")

    addMapping(t)

    ### VPC CONFIGURATION ###
    vpc = ec2.VPC(
        "MainVPC",
        CidrBlock="10.1.0.0/16"
    )

    t.add_resource(vpc)

    vpc_id = Ref(vpc)

    subnet_1 = ec2.Subnet(
        "WebAppSubnet1",
        t,
        AvailabilityZone="us-east-1a",
        CidrBlock="10.1.0.0/24",
        MapPublicIpOnLaunch=True,
        VpcId=vpc_id,
    )
    subnet_1_id = Ref(subnet_1)

    subnet_2 = ec2.Subnet(
        "WebAppSubnet2",
        t,
        AvailabilityZone="us-east-1b",
        CidrBlock="10.1.1.0/24",
        MapPublicIpOnLaunch=True,
        VpcId=vpc_id,
    )
    subnet_2_id = Ref(subnet_2)

    ### NETWORKING ###
    igw = ec2.InternetGateway("internetGateway", t)

    gateway_to_internet = ec2.VPCGatewayAttachment(
        "GatewayToInternet",
        t,
        VpcId=vpc_id,
        InternetGatewayId=Ref(igw)
    )

    route_table = ec2.RouteTable(
        "subnetRouteTable",
        t,
        VpcId=vpc_id
    )

    route_table_id = Ref(route_table)
    internet_route = ec2.Route(
        "routeToInternet",
        t,
        DependsOn=gateway_to_internet,
        DestinationCidrBlock="0.0.0.0/0",
        GatewayId=Ref(igw),
        RouteTableId=route_table_id
    )
    subnet_1_route_assoc = ec2.SubnetRouteTableAssociation(
        "Subnet1RouteAssociation",
        t,
        RouteTableId=route_table_id,
        SubnetId=Ref(subnet_1)
    )
    subnet_2_route_assoc = ec2.SubnetRouteTableAssociation(
        "Subnet2RouteAssociation",
        t,
        RouteTableId=route_table_id,
        SubnetId=Ref(subnet_2)
    )

    http_ingress = {
        "CidrIp": "0.0.0.0/0",
        "Description": "Allow HTTP traffic in from internet.",
        "IpProtocol": "tcp",
        "FromPort": 80,
        "ToPort": 80,
    }
    ssh_ingress = {
        "CidrIp": "0.0.0.0/0",
        "Description": "Allow SSH traffic in from internet.",
        "IpProtocol": "tcp",
        "FromPort": 22,
        "ToPort": 22,
    }

    elb_sg = ec2.SecurityGroup(
        "elbSecurityGroup",
        t,
        GroupName="WebGroup",
        GroupDescription="Allow web traffic in from internet to ELB",
        VpcId=vpc_id,
        SecurityGroupIngress=[
            http_ingress
        ])
    ssh_sg = ec2.SecurityGroup(
        "sshSecurityGroup",
        t,
        GroupName="SSHGroup",
        GroupDescription="Allow SSH traffic in from internet",
        VpcId=vpc_id,
        SecurityGroupIngress=[
            ssh_ingress
        ]
    )
    elb_sg_id = Ref(elb_sg)
    ssh_sg_id = Ref(ssh_sg)

    autoscale_ingress = {
        "SourceSecurityGroupId": elb_sg_id,
        "Description": "Allow web traffic in from ELB",
        "IpProtocol": "tcp",
        "FromPort": 80,
        "ToPort": 80
    }
    autoscale_sg = ec2.SecurityGroup(
        "WebAutoscaleSG",
        t,
        GroupName="AutoscaleGroup",
        GroupDescription="Allow web traffic in from elb on port 80",
        VpcId=vpc_id,
        SecurityGroupIngress=[
            autoscale_ingress
        ]
    )
    autoscale_sg_id = Ref(autoscale_sg)

    # BUCKETS
    app_bucket = s3.Bucket(
        "CodeDeployApplicationBucket",
        t,
    )

    ### LOAD BALANCING ###
    Web_elb = elb.LoadBalancer(
        "WebElb",
        t,
        Name="WebElb", # TODO: Fix for name conflict
        Subnets=[subnet_1_id, subnet_2_id],
        SecurityGroups=[elb_sg_id]
    )

    Web_target_group = elb.TargetGroup(
        "WebTargetGroup",
        t,
        DependsOn=Web_elb,
        HealthCheckPath="/health",
        HealthCheckPort=80,
        HealthCheckProtocol="HTTP",
        Matcher=elb.Matcher(HttpCode="200"),
        Name="NginxTargetGroup",
        Port=80,
        Protocol="HTTP",
        VpcId=vpc_id
    )

    Web_listener = elb.Listener(
        "WebListener",
        t,
        LoadBalancerArn=Ref(Web_elb),
        DefaultActions=[
            elb.Action("forwardAction",
                TargetGroupArn=Ref(Web_target_group),
                Type="forward"
            )
        ],
        Port=80,
        Protocol="HTTP"
    )

    ### AUTOSCALING ###
    # Everything after sudo -u ubuntu is one command
    # The sudo command is required to properly set file permissions when
    # running the ansible script as it assumes running from non root user
    lc_user_data = Base64(Join("\n",
    [
        "#!/bin/bash",
        "apt-add-repository -y ppa:ansible/ansible",
        "apt-get update && sudo apt-get -y upgrade",
        "apt-get -y install git",
        "apt-get -y install ansible",
        "cd /home/ubuntu/",
        "sudo -H -u ubuntu bash -c '"
        "export LC_ALL=C.UTF-8 && "
        "export LANG=C.UTF-8 && "
        "ansible-pull -U https://github.com/DameonSmith/aws-meetup-ansible.git --extra-vars \"user=ubuntu\"'"
    ]))

    web_instance_role = iam.Role(
        "webInstanceCodeDeployRole",
        t,
        AssumeRolePolicyDocument={
            'Statement': [{
                'Effect': 'Allow',
                'Principal': {
                    'Service': 'ec2.amazonaws.com'
                },
                'Action': 'sts:AssumeRole'
            }]
        },
        Policies=[
            iam.Policy(
                PolicyName="CodeDeployS3Policy",
                PolicyDocument=aws.Policy(
                    Version='2012-10-17',
                    Statement=[
                        aws.Statement(
                            Sid='CodeDeployS3',
                            Effect=aws.Allow,
                            Action=[
                                aws_s3.PutObject,
                                aws_s3.GetObject,
                                aws_s3.GetObjectVersion,
                                aws_s3.DeleteObject,
                                aws_s3.ListObjects,
                                aws_s3.ListBucket,
                                aws_s3.ListBucketVersions,
                                aws_s3.ListAllMyBuckets,
                                aws_s3.ListMultipartUploadParts,
                                aws_s3.ListBucketMultipartUploads,
                                aws_s3.ListBucketByTags,
                            ],
                            Resource=[
                                GetAtt(app_bucket, 'Arn'),
                                Join('', [
                                    GetAtt(app_bucket, 'Arn'),
                                    '/*',
                                ]),
                                "arn:aws:s3:::aws-codedeploy-us-east-2/*",
                                "arn:aws:s3:::aws-codedeploy-us-east-1/*",
                                "arn:aws:s3:::aws-codedeploy-us-west-1/*",
                                "arn:aws:s3:::aws-codedeploy-us-west-2/*",
                                "arn:aws:s3:::aws-codedeploy-ca-central-1/*",
                                "arn:aws:s3:::aws-codedeploy-eu-west-1/*",
                                "arn:aws:s3:::aws-codedeploy-eu-west-2/*",
                                "arn:aws:s3:::aws-codedeploy-eu-west-3/*",
                                "arn:aws:s3:::aws-codedeploy-eu-central-1/*",
                                "arn:aws:s3:::aws-codedeploy-ap-northeast-1/*",
                                "arn:aws:s3:::aws-codedeploy-ap-northeast-2/*",
                                "arn:aws:s3:::aws-codedeploy-ap-southeast-1/*",
                                "arn:aws:s3:::aws-codedeploy-ap-southeast-2/*",
                                "arn:aws:s3:::aws-codedeploy-ap-south-1/*",
                                "arn:aws:s3:::aws-codedeploy-sa-east-1/*",
                            ]
                        )
                    ]
                )
            )
        ]
    )

    web_instance_profile = iam.InstanceProfile(
        "webInstanceProfile",
        t,
        Path='/',
        Roles=[Ref(web_instance_role)],
    )

    Web_launch_config = autoscaling.LaunchConfiguration(
        "webLaunchConfig",
        t,
        ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), # TODO: Remove magic string
        SecurityGroups=[ssh_sg_id, autoscale_sg_id],
        IamInstanceProfile=Ref(web_instance_profile),
        InstanceType="t2.micro",
        BlockDeviceMappings= [{
            "DeviceName": "/dev/sdk",
            "Ebs": {"VolumeSize": "10"}
        }],
        UserData= lc_user_data,
        KeyName="advanced-cfn",
    )

    Web_autoscaler = autoscaling.AutoScalingGroup(
        "WebAutoScaler",
        t,
        LaunchConfigurationName=Ref(Web_launch_config),
        MinSize="2", # TODO: Change to parameter
        MaxSize="2",
        VPCZoneIdentifier=[subnet_2_id, subnet_1_id],
        TargetGroupARNs= [Ref(Web_target_group)]
    )

    t.add_output([
        Output(
            "ALBDNS",
            Description="The DNS name for the application load balancer.",
            Value=GetAtt(Web_elb, "DNSName")
        )
    ])


    # DEVTOOLS CONFIG
    codebuild_service_role = iam.Role(
        "CMSCodeBuildServiceRole",
        t,
        AssumeRolePolicyDocument={
            'Statement': [{
                'Effect': 'Allow',
                'Principal': {
                    'Service': ['codebuild.amazonaws.com']
                },
                'Action': ['sts:AssumeRole']
            }]
        },
        Policies=[
            iam.Policy(
                PolicyName="CloudWatchLogsPolicy",
                PolicyDocument=aws.Policy(
                    Version="2012-10-17",
                    Statement=[
                        aws.Statement(
                            Sid='logs',
                            Effect=aws.Allow,
                            Action=[
                                aws_logs.CreateLogGroup,
                                aws_logs.CreateLogStream,
                                aws_logs.PutLogEvents
                            ],
                            Resource=['*']
                        )
                    ]
                )
            ),
            iam.Policy(
                PolicyName="s3AccessPolicy",
                PolicyDocument=aws.Policy(
                    Version="2012-10-17",
                    Statement=[
                        aws.Statement(
                            Sid='codebuilder',
                            Effect=aws.Allow,
                            Action=[
                                aws_s3.PutObject,
                                aws_s3.GetObject,
                                aws_s3.GetObjectVersion,
                                aws_s3.DeleteObject
                            ],
                            Resource=[
                                GetAtt(app_bucket, 'Arn'),
                                Join('', [
                                    GetAtt(app_bucket, 'Arn'),
                                    '/*',
                                ])
                            ]
                        )
                    ]
                )
            )
        ]
    )


    github_repo = Parameter(
        "GithubRepoLink",
        Description="Name of the repository you wish to connect to codebuild.",
        Type="String"
    )

    artifact_key = Parameter(
        "ArtifactKey",
        Description="The key for the artifact that codebuild creates.",
        Type="String"
    )

    t.add_parameter(github_repo)
    t.add_parameter(artifact_key)


    cms_code_build_project = codebuild.Project(
        "CMSBuild",
        t,
        Name="CMS-Build",
        Artifacts=codebuild.Artifacts(
            Location=Ref(app_bucket),
            Name=Ref(artifact_key),
            NamespaceType="BUILD_ID",
            Type="S3",
            Packaging="ZIP"
        ),
        Description="Code build for CMS",
        Environment=codebuild.Environment(
            ComputeType="BUILD_GENERAL1_SMALL",
            Image="aws/codebuild/python:3.6.5",
            Type="LINUX_CONTAINER",
        ),
        ServiceRole=GetAtt(codebuild_service_role, 'Arn'),
        Source=codebuild.Source(
            "CMSSourceCode",
            Auth=codebuild.SourceAuth(
                "GitHubAuth",
                Type="OAUTH"
            ),
            Location=Ref(github_repo),
            Type="GITHUB"
        ),
        Triggers=codebuild.ProjectTriggers(
            Webhook=True
        )
    )


    codedeploy_service_role = iam.Role(
        "CMSDeploymentGroupServiceRole",
        t,
        AssumeRolePolicyDocument={
            'Statement': [{
                'Effect': 'Allow',
                'Principal': {
                    'Service': ['codedeploy.amazonaws.com']
                },
                'Action': ['sts:AssumeRole']
            }]
        },
        Policies=[
            iam.Policy(
                PolicyName="CloudWatchLogsPolicy",
                PolicyDocument=aws.Policy(
                    Version="2012-10-17",
                    Statement=[
                        aws.Statement(
                            Sid='logs',
                            Effect=aws.Allow,
                            Action=[
                                aws_logs.CreateLogGroup,
                                aws_logs.CreateLogStream,
                                aws_logs.PutLogEvents
                            ],
                            Resource=['*']
                        )
                    ]
                )
            ),
            iam.Policy(
                PolicyName="s3AccessPolicy",
                PolicyDocument=aws.Policy(
                    Version="2012-10-17",
                    Statement=[
                        aws.Statement(
                            Sid='codebuilder',
                            Effect=aws.Allow,
                            Action=[
                                aws_s3.PutObject,
                                aws_s3.GetObject,
                                aws_s3.GetObjectVersion,
                                aws_s3.DeleteObject
                            ],
                            Resource=[
                                GetAtt(app_bucket, 'Arn'),
                                Join('', [
                                    GetAtt(app_bucket, 'Arn'),
                                    '/*'
                                ])
                            ]
                        )
                    ]
                )
            ),
            iam.Policy(
                PolicyName="autoscalingAccess",
                PolicyDocument=aws.Policy(
                    Version="2012-10-17",
                    Statement=[
                        aws.Statement(
                            Sid='codebuilder',
                            Effect=aws.Allow,
                            Action=[
                                aws.Action('autoscaling', '*'),
                                aws.Action('elasticloadbalancing', '*')
                            ],
                            Resource=[
                                '*'
                            ]
                        )
                    ]
                )
            )
        ]
    )

    cms_codedeploy_application = codedeploy.Application(
        "CMSCodeDeployApplication",
        t,
    )


    cms_deployment_group = codedeploy.DeploymentGroup(
        "CMSDeploymentGroup",
        t,
        DependsOn=[cms_codedeploy_application],
        ApplicationName=Ref(cms_codedeploy_application),
        AutoScalingGroups=[Ref(Web_autoscaler)],
        LoadBalancerInfo=codedeploy.LoadBalancerInfo(
            "CodeDeployLBInfo",
            TargetGroupInfoList=[
                    codedeploy.TargetGroupInfoList(
                        "WebTargetGroup",
                       Name=GetAtt(Web_target_group, "TargetGroupName")
                    )
            ]
        ),
        ServiceRoleArn=GetAtt(codedeploy_service_role, 'Arn')
    )

    print(t.to_yaml())
def main():
    # Initialize template
    template = Template()
    template.set_version("2010-09-09")
    template.set_description("""\
    Configures autoscaling group for nginx app""")

    # Collect template properties through parameters
    InstanceType = template.add_parameter(
        Parameter(
            "InstanceType",
            Type="String",
            Description="WebServer EC2 instance type",
            Default="t2.small",
            AllowedValues=[
                "t2.micro", "t2.small", "t2.medium", "t2.large", "t2.xlarge"
            ],
            ConstraintDescription="Must be a valid EC2 instance type.",
        ))

    KeyName = template.add_parameter(
        Parameter(
            "KeyName",
            Type="String",
            Description="Name of an existing EC2 KeyPair to enable SSH access",
            MinLength="1",
            AllowedPattern="[\x20-\x7E]*",
            MaxLength="255",
            ConstraintDescription="Can contain only ASCII characters.",
        ))

    ScaleCapacity = template.add_parameter(
        Parameter(
            "ScaleCapacity",
            Default="1",
            Type="String",
            Description="Number of nginx servers to run",
        ))

    PublicSubnet1 = template.add_parameter(
        Parameter(
            "PublicSubnet1",
            Type="String",
            Description=
            "A public VPC subnet ID for the nginx app load balancer.",
        ))

    PublicSubnet2 = template.add_parameter(
        Parameter(
            "PublicSubnet2",
            Type="String",
            Description="A public VPC subnet ID for the nginx load balancer.",
        ))

    VPCAvailabilityZone2 = template.add_parameter(
        Parameter(
            "VPCAvailabilityZone2",
            MinLength="1",
            Type="String",
            Description="Second availability zone",
            MaxLength="255",
        ))

    VPCAvailabilityZone1 = template.add_parameter(
        Parameter(
            "VPCAvailabilityZone1",
            MinLength="1",
            Type="String",
            Description="First availability zone",
            MaxLength="255",
        ))

    PrivateSubnet2 = template.add_parameter(
        Parameter(
            "PrivateSubnet2",
            Type="String",
            Description="Second private VPC subnet ID for the nginx app.",
        ))

    PrivateSubnet1 = template.add_parameter(
        Parameter(
            "PrivateSubnet1",
            Type="String",
            Description="First private VPC subnet ID for the nginx app.",
        ))

    VpcId = template.add_parameter(
        Parameter(
            "VpcId",
            Type="String",
            Description="VPC Id.",
        ))

    # as of now only provide centos based ami id
    AmiId = template.add_parameter(
        Parameter(
            "AmiId",
            Type="String",
            Description="AMI Id.",
        ))

    # Create a common security group
    NginxInstanceSG = template.add_resource(
        ec2.SecurityGroup(
            "InstanceSecurityGroup",
            VpcId=Ref(VpcId),
            GroupDescription="Enable SSH and HTTP access on the inbound port",
            SecurityGroupIngress=[
                ec2.SecurityGroupRule(
                    IpProtocol="tcp",
                    FromPort="22",
                    ToPort="22",
                    CidrIp="10.0.0.0/8",
                ),
                ec2.SecurityGroupRule(
                    IpProtocol="tcp",
                    FromPort="80",
                    ToPort="80",
                    CidrIp="0.0.0.0/0",
                )
            ]))

    # Add the application LB
    ApplicationElasticLB = template.add_resource(
        elb.LoadBalancer("ApplicationElasticLB",
                         Name="ApplicationElasticLB",
                         Scheme="internet-facing",
                         Subnets=[Ref(PublicSubnet1),
                                  Ref(PublicSubnet2)],
                         SecurityGroups=[Ref(NginxInstanceSG)]))

    # Add Target Group for the ALB
    TargetGroupNginx = template.add_resource(
        elb.TargetGroup(
            "TargetGroupNginx",
            VpcId=Ref(VpcId),
            HealthCheckIntervalSeconds="30",
            HealthCheckProtocol="HTTP",
            HealthCheckTimeoutSeconds="10",
            HealthyThresholdCount="4",
            Matcher=elb.Matcher(HttpCode="200"),
            Name="NginxTarget",
            Port="80",
            Protocol="HTTP",
            UnhealthyThresholdCount="3",
        ))

    # Add ALB listener
    Listener = template.add_resource(
        elb.Listener("Listener",
                     Port="80",
                     Protocol="HTTP",
                     LoadBalancerArn=Ref(ApplicationElasticLB),
                     DefaultActions=[
                         elb.Action(Type="forward",
                                    TargetGroupArn=Ref(TargetGroupNginx))
                     ]))

    # Add launch configuration for auto scaling
    LaunchConfig = template.add_resource(
        LaunchConfiguration(
            "LaunchConfiguration",
            ImageId=Ref(AmiId),
            KeyName=Ref(KeyName),
            AssociatePublicIpAddress="False",
            LaunchConfigurationName="nginx-LC",
            UserData=Base64(
                Join('', [
                    "#!/bin/bash\n", "yum update\n", "yum -y install nginx\n",
                    "chkconfig nginx on\n", "service nginx start"
                ])),
            SecurityGroups=[Ref(NginxInstanceSG)],
            InstanceType=Ref(InstanceType)))

    # Add auto scaling group
    AutoscalingGroup = template.add_resource(
        AutoScalingGroup(
            "AutoscalingGroup",
            DesiredCapacity=Ref(ScaleCapacity),
            LaunchConfigurationName=Ref(LaunchConfig),
            MinSize="1",
            TargetGroupARNs=[Ref(TargetGroupNginx)],
            MaxSize=Ref(ScaleCapacity),
            VPCZoneIdentifier=[Ref(PrivateSubnet1),
                               Ref(PrivateSubnet2)],
            AvailabilityZones=[
                Ref(VPCAvailabilityZone1),
                Ref(VPCAvailabilityZone2)
            ],
            HealthCheckType="EC2"))

    template.add_output(
        Output("URL",
               Description="URL of the sample website",
               Value=Join("",
                          ["http://",
                           GetAtt(ApplicationElasticLB, "DNSName")])))

    #print(template.to_json())

    with open('AutoScaling.yaml', 'w') as f:
        f.write(template.to_yaml())
Example #25
0
        ),
        ConnectionDrainingPolicy=elb.ConnectionDrainingPolicy(
            Enabled=True,
            Timeout=10,
        ),
        CrossZone=True,
        Subnets=Ref("PublicSubnet"),
        SecurityGroups=[Ref("LoadBalancerSecurityGroup")],
    ))

ud = Base64(
    Join('\n', [
        "#!/bin/bash", "yum install --enablerepo=epel -y git",
        "yum install ruby -y",
        "wget https://aws-codedeploy-us-west-2.s3.amazonaws.com/latest/install",
        "chmod +x ./install", "./install auto",
        "#yum install java-1.8.0-openjdk -y",
        "#echo 2 |/usr/sbin/alternatives --config java", "pip install ansible",
        AnsiblePullCmd, "echo '*/10 * * * * {}' > /etc/cron.d/ansible-pull".
        format(AnsiblePullCmd)
    ]))

t.add_resource(
    Role("Role",
         AssumeRolePolicyDocument=Policy(Statement=[
             Statement(Effect=Allow,
                       Action=[AssumeRole],
                       Principal=Principal("Service", ["ec2.amazonaws.com"]))
         ])))

t.add_resource(
             Ref(instanceSecurityGroup)],
         AssociatePublicIpAddress='true',
         DeviceIndex='0',
         DeleteOnTermination='true',
         SubnetId=Ref(subnet))],
 UserData=Base64(
     Join(
         '',
         [
             '#!/bin/bash -xe\n',
             'yum update -y aws-cfn-bootstrap\n',
             '/opt/aws/bin/cfn-init -v ',
             '         --stack ',
             Ref('AWS::StackName'),
             '         --resource WebServerInstance ',
             '         --region ',
             Ref('AWS::Region'),
             '\n',
             '/opt/aws/bin/cfn-signal -e $? ',
             '         --stack ',
             Ref('AWS::StackName'),
             '         --resource WebServerInstance ',
             '         --region ',
             Ref('AWS::Region'),
             '\n',
         ])),
 CreationPolicy=CreationPolicy(
     ResourceSignal=ResourceSignal(
         Timeout='PT15M')),
 Tags=Tags(
     Application=ref_stack_id),
Example #27
0
                FromPort="22",
                ToPort="22",
                CidrIp=PublicCidrIp,
            ),
            ec2.SecurityGroupRule(
                IpProtocol="tcp",
                FromPort=ApplicationPort,
                ToPort=ApplicationPort,
                CidrIp="0.0.0.0/0",
            ),
        ],
    ))

ud = Base64(
    Join('\n', [
        "#!/bin/bash", "sudo yum install --enablerepo=epel -y git",
        "pip install ansible", AnsiblePullCmd
    ]))

t.add_resource(
    Role("Role",
         AssumeRolePolicyDocument=Policy(Statement=[
             Statement(Effect=Allow,
                       Action=[AssumeRole],
                       Principal=Principal("Service", ["ec2.amazonaws.com"]))
         ])))

t.add_resource(
    IAMPolicy("Policy",
              PolicyName="AllowCodePipeline",
              PolicyDocument=Policy(Statement=[
Example #28
0
    def create_auto_scaling_resources(self, worker_security_group, worker_lb):
        worker_launch_config_name = 'lcWorker'

        worker_launch_config = self.add_resource(
            asg.LaunchConfiguration(
                worker_launch_config_name,
                EbsOptimized=True,
                ImageId=Ref(self.worker_ami),
                IamInstanceProfile=Ref(self.worker_instance_profile),
                InstanceType=Ref(self.worker_instance_type),
                KeyName=Ref(self.keyname),
                SecurityGroups=[Ref(worker_security_group)],
                UserData=Base64(
                    Join('', self.get_cloud_config()))
            ))

        worker_auto_scaling_group_name = 'asgWorker'

        worker_asg = self.add_resource(
            asg.AutoScalingGroup(
                worker_auto_scaling_group_name,
                AvailabilityZones=Ref(self.availability_zones),
                Cooldown=300,
                DesiredCapacity=Ref(self.worker_auto_scaling_desired),
                HealthCheckGracePeriod=600,
                HealthCheckType='ELB',
                LaunchConfigurationName=Ref(worker_launch_config),
                LoadBalancerNames=[Ref(worker_lb)],
                MaxSize=Ref(self.worker_auto_scaling_max),
                MinSize=Ref(self.worker_auto_scaling_min),
                NotificationConfigurations=[
                    asg.NotificationConfigurations(
                        TopicARN=Ref(self.notification_topic_arn),
                        NotificationTypes=[
                            asg.EC2_INSTANCE_LAUNCH,
                            asg.EC2_INSTANCE_LAUNCH_ERROR,
                            asg.EC2_INSTANCE_TERMINATE,
                            asg.EC2_INSTANCE_TERMINATE_ERROR
                        ]
                    )
                ],
                VPCZoneIdentifier=Ref(self.private_subnets),
                Tags=[asg.Tag('Name', 'Worker', True)]
            )
        )

        self.add_resource(
            asg.ScheduledAction(
                'schedWorkerAutoScalingStart',
                AutoScalingGroupName=Ref(worker_asg),
                DesiredCapacity=Ref(
                    self.worker_auto_scaling_schedule_start_capacity),
                Recurrence=Ref(
                    self.worker_auto_scaling_schedule_start_recurrence)
            )
        )

        self.add_resource(
            asg.ScheduledAction(
                'schedWorkerAutoScalingEnd',
                AutoScalingGroupName=Ref(worker_asg),
                DesiredCapacity=Ref(
                    self.worker_auto_scaling_schedule_end_capacity),
                Recurrence=Ref(
                    self.worker_auto_scaling_schedule_end_recurrence)
            )
        )
Example #29
0
        GroupDescription="Allow SSH and TCP/{} access".format(ApplicationPort),
        SecurityGroupIngress=[
            ec2.SecurityGroupRule(IpProtocol="tcp",
                                  FromPort="22",
                                  ToPort="22",
                                  CidrIp="0.0.0.0/0"),
            ec2.SecurityGroupRule(IpProtocol="tcp",
                                  FromPort=ApplicationPort,
                                  ToPort=ApplicationPort,
                                  CidrIp="0.0.0.0/0")
        ]))

ud = Base64(
    Join('\n', [
        "#!/bin/bash", "sudo yum install --enablerepo=epel -y nodejs",
        "wget http://bit.ly/2vESNuc -O /home/ec2-user/helloworld.js",
        "wget http://bit.ly/2vVvT18 -O /etc/init/helloworld.conf",
        "start helloworld"
    ]))

t.add_resource(
    ec2.Instance("instance",
                 ImageId="ami-bf4193c7",
                 InstanceType="t2.micro",
                 SecurityGroups=[Ref("SecurityGroup")],
                 KeyName=Ref("KeyPair"),
                 UserData=ud))

t.add_output(
    Output("InstancePublicIp",
           Description="Public Ip of our instance.",
Example #30
0
        ),
        ec2.SecurityGroupRule(
            IpProtocol="tcp",
            FromPort=ApplicationPort,
            ToPort=ApplicationPort,
            CidrIp="0.0.0.0/0",
        ),
    ],
))

ud = Base64(Join('\n', [
    "#!/bin/bash",
    "sudo hostnamectl set-hostname test.localdomain",
    "sudo reboot",
    "sudo yum -y gcc-c++ make",
    "curl -sL https://rpm.nodesource.com/setup_11.x | sudo -E bash -",
    "sudo yum -y install nodejs",
    "sudo yum -y install wget",
    "wget https://raw.githubusercontent.com/szhouchoice/EffectiveDevOpsTemplates/master/helloworld.js -O /home/centos/helloworld.js",
    "wget https://raw.githubusercontent.com/szhouchoice/EffectiveDevOpsTemplates/master/helloworld.service -O /lib/systemd/system/helloworld.service",
    "service helloworld start "
]))

t.add_resource(ec2.Instance(
    "instance",
    ImageId="ami-03e057d80a32a76ab",
    InstanceType="t2.micro",
    SecurityGroups=[Ref("SecurityGroup")],
    KeyName=Ref("KeyPair"),
    UserData=ud,
))