GatewayId=Ref(gateway))) # # Security Group Resources # nat_security_group_name = 'sgNAT' nat_security_group = t.add_resource( ec2.SecurityGroup(nat_security_group_name, GroupDescription='Enables access to the NAT devices', VpcId=Ref(vpc), SecurityGroupIngress=[ ec2.SecurityGroupRule(IpProtocol='tcp', CidrIp=ALLOW_ALL_CIDR, FromPort=p, ToPort=p) for p in [HTTP, HTTPS] ], SecurityGroupEgress=[ ec2.SecurityGroupRule(IpProtocol='tcp', CidrIp=ALLOW_ALL_CIDR, FromPort=p, ToPort=p) for p in [HTTP, HTTPS] ], Tags=Tags(Name=nat_security_group_name))) bastion_security_group_name = 'sgBastion' bastion_security_group = t.add_resource( ec2.SecurityGroup(bastion_security_group_name, GroupDescription='Enables access to the BastionHost', VpcId=Ref(vpc), SecurityGroupIngress=[
def main(): """ Create a ElastiCache Redis Node and EC2 Instance """ template = Template() # Description template.add_description( 'AWS CloudFormation Sample Template ElastiCache_Redis:' 'Sample template showIing how to create an Amazon' 'ElastiCache Redis Cluster. **WARNING** This template' 'creates an Amazon EC2 Instance and an Amazon ElastiCache' 'Cluster. You will be billed for the AWS resources used' 'if you create a stack from this template.') # Mappings template.add_mapping( 'AWSInstanceType2Arch', { 't1.micro': { 'Arch': 'PV64' }, 't2.micro': { 'Arch': 'HVM64' }, 't2.small': { 'Arch': 'HVM64' }, 't2.medium': { 'Arch': 'HVM64' }, 'm1.small': { 'Arch': 'PV64' }, 'm1.medium': { 'Arch': 'PV64' }, 'm1.large': { 'Arch': 'PV64' }, 'm1.xlarge': { 'Arch': 'PV64' }, 'm2.xlarge': { 'Arch': 'PV64' }, 'm2.2xlarge': { 'Arch': 'PV64' }, 'm2.4xlarge': { 'Arch': 'PV64' }, 'm3.medium': { 'Arch': 'HVM64' }, 'm3.large': { 'Arch': 'HVM64' }, 'm3.xlarge': { 'Arch': 'HVM64' }, 'm3.2xlarge': { 'Arch': 'HVM64' }, 'c1.medium': { 'Arch': 'PV64' }, 'c1.xlarge': { 'Arch': 'PV64' }, 'c3.large': { 'Arch': 'HVM64' }, 'c3.xlarge': { 'Arch': 'HVM64' }, 'c3.2xlarge': { 'Arch': 'HVM64' }, 'c3.4xlarge': { 'Arch': 'HVM64' }, 'c3.8xlarge': { 'Arch': 'HVM64' }, 'c4.large': { 'Arch': 'HVM64' }, 'c4.xlarge': { 'Arch': 'HVM64' }, 'c4.2xlarge': { 'Arch': 'HVM64' }, 'c4.4xlarge': { 'Arch': 'HVM64' }, 'c4.8xlarge': { 'Arch': 'HVM64' }, 'g2.2xlarge': { 'Arch': 'HVMG2' }, 'r3.large': { 'Arch': 'HVM64' }, 'r3.xlarge': { 'Arch': 'HVM64' }, 'r3.2xlarge': { 'Arch': 'HVM64' }, 'r3.4xlarge': { 'Arch': 'HVM64' }, 'r3.8xlarge': { 'Arch': 'HVM64' }, 'i2.xlarge': { 'Arch': 'HVM64' }, 'i2.2xlarge': { 'Arch': 'HVM64' }, 'i2.4xlarge': { 'Arch': 'HVM64' }, 'i2.8xlarge': { 'Arch': 'HVM64' }, 'd2.xlarge': { 'Arch': 'HVM64' }, 'd2.2xlarge': { 'Arch': 'HVM64' }, 'd2.4xlarge': { 'Arch': 'HVM64' }, 'd2.8xlarge': { 'Arch': 'HVM64' }, 'hi1.4xlarge': { 'Arch': 'HVM64' }, 'hs1.8xlarge': { 'Arch': 'HVM64' }, 'cr1.8xlarge': { 'Arch': 'HVM64' }, 'cc2.8xlarge': { 'Arch': 'HVM64' } }) template.add_mapping( 'AWSRegionArch2AMI', { 'us-east-1': { 'PV64': 'ami-0f4cfd64', 'HVM64': 'ami-0d4cfd66', 'HVMG2': 'ami-5b05ba30' }, 'us-west-2': { 'PV64': 'ami-d3c5d1e3', 'HVM64': 'ami-d5c5d1e5', 'HVMG2': 'ami-a9d6c099' }, 'us-west-1': { 'PV64': 'ami-85ea13c1', 'HVM64': 'ami-87ea13c3', 'HVMG2': 'ami-37827a73' }, 'eu-west-1': { 'PV64': 'ami-d6d18ea1', 'HVM64': 'ami-e4d18e93', 'HVMG2': 'ami-72a9f105' }, 'eu-central-1': { 'PV64': 'ami-a4b0b7b9', 'HVM64': 'ami-a6b0b7bb', 'HVMG2': 'ami-a6c9cfbb' }, 'ap-northeast-1': { 'PV64': 'ami-1a1b9f1a', 'HVM64': 'ami-1c1b9f1c', 'HVMG2': 'ami-f644c4f6' }, 'ap-southeast-1': { 'PV64': 'ami-d24b4280', 'HVM64': 'ami-d44b4286', 'HVMG2': 'ami-12b5bc40' }, 'ap-southeast-2': { 'PV64': 'ami-ef7b39d5', 'HVM64': 'ami-db7b39e1', 'HVMG2': 'ami-b3337e89' }, 'sa-east-1': { 'PV64': 'ami-5b098146', 'HVM64': 'ami-55098148', 'HVMG2': 'NOT_SUPPORTED' }, 'cn-north-1': { 'PV64': 'ami-bec45887', 'HVM64': 'ami-bcc45885', 'HVMG2': 'NOT_SUPPORTED' } }) template.add_mapping( 'Region2Principal', { 'us-east-1': { 'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com' }, 'us-west-2': { 'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com' }, 'us-west-1': { 'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com' }, 'eu-west-1': { 'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com' }, 'ap-southeast-1': { 'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com' }, 'ap-northeast-1': { 'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com' }, 'ap-southeast-2': { 'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com' }, 'sa-east-1': { 'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com' }, 'cn-north-1': { 'EC2Principal': 'ec2.amazonaws.com.cn', 'OpsWorksPrincipal': 'opsworks.amazonaws.com.cn' }, 'eu-central-1': { 'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com' } }) # Parameters cachenodetype = template.add_parameter( Parameter( 'ClusterNodeType', Description= 'The compute and memory capacity of the nodes in the Redis' ' Cluster', Type='String', Default='cache.m1.small', AllowedValues=[ 'cache.m1.small', 'cache.m1.large', 'cache.m1.xlarge', 'cache.m2.xlarge', 'cache.m2.2xlarge', 'cache.m2.4xlarge', 'cache.c1.xlarge' ], ConstraintDescription='must select a valid Cache Node type.', )) instancetype = template.add_parameter( Parameter( 'InstanceType', Description='WebServer EC2 instance type', Type='String', Default='t2.micro', AllowedValues=[ 't1.micro', 't2.micro', 't2.small', 't2.medium', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', 'c1.medium', 'c1.xlarge', 'c3.large', 'c3.xlarge', 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', 'c4.large', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', 'g2.2xlarge', 'r3.large', 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'd2.xlarge', 'd2.2xlarge', 'd2.4xlarge', 'd2.8xlarge', 'hi1.4xlarge', 'hs1.8xlarge', 'cr1.8xlarge', 'cc2.8xlarge', 'cg1.4xlarge' ], ConstraintDescription='must be a valid EC2 instance type.', )) keyname = template.add_parameter( Parameter( 'KeyName', Description='Name of an existing EC2 KeyPair to enable SSH access' ' to the instance', Type='AWS::EC2::KeyPair::KeyName', ConstraintDescription= 'must be the name of an existing EC2 KeyPair.', )) sshlocation = template.add_parameter( Parameter('SSHLocation', Description='The IP address range that can be used to SSH to' ' the EC2 instances', Type='String', MinLength='9', MaxLength='18', Default='0.0.0.0/0', AllowedPattern='(\\d{1,3})\\.(\\d{1,3})\\.' '(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})', ConstraintDescription='must be a valid IP CIDR range of the' ' form x.x.x.x/x.')) # Resources webserverrole = template.add_resource( iam.Role( 'WebServerRole', AssumeRolePolicyDocument=Policy(Statement=[ Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal('Service', [ FindInMap('Region2Principal', Ref('AWS::Region'), 'EC2Principal') ]), ) ]), Path='/', )) template.add_resource( iam.PolicyType( 'WebServerRolePolicy', PolicyName='WebServerRole', PolicyDocument=awacs.aws.Policy(Statement=[ awacs.aws.Statement(Action=[ awacs.aws.Action("elasticache", "DescribeCacheClusters") ], Resource=["*"], Effect=awacs.aws.Allow) ]), Roles=[Ref(webserverrole)], )) webserverinstanceprofile = template.add_resource( iam.InstanceProfile( 'WebServerInstanceProfile', Path='/', Roles=[Ref(webserverrole)], )) webserversg = template.add_resource( ec2.SecurityGroup('WebServerSecurityGroup', GroupDescription='Enable HTTP and SSH access', SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol='tcp', FromPort='22', ToPort='22', CidrIp=Ref(sshlocation), ), ec2.SecurityGroupRule( IpProtocol='tcp', FromPort='80', ToPort='80', CidrIp='0.0.0.0/0', ) ])) webserverinstance = template.add_resource( ec2.Instance( 'WebServerInstance', Metadata=cloudformation.Metadata( cloudformation.Init({ 'config': cloudformation.InitConfig( packages={ 'yum': { 'httpd': [], 'php': [], 'php-devel': [], 'gcc': [], 'make': [] } }, files=cloudformation.InitFiles({ '/var/www/html/index.php': cloudformation. InitFile(content=Join('', [ '<?php\n', 'echo \"<h1>AWS CloudFormation sample' ' application for Amazon ElastiCache' ' Redis Cluster</h1>\";\n', '\n', '$cluster_config = json_decode(' 'file_get_contents(\'/tmp/cacheclusterconfig\'' '), true);\n', '$endpoint = $cluster_config[\'CacheClusters' '\'][0][\'CacheNodes\'][0][\'Endpoint\'][\'Add' 'ress\'];\n', '$port = $cluster_config[\'CacheClusters\'][0]' '[\'CacheNodes\'][0][\'Endpoint\'][\'Port\'];' '\n', '\n', 'echo \"<p>Connecting to Redis Cache Cluster ' 'node \'{$endpoint}\' on port {$port}</p>\";' '\n', '\n', '$redis=new Redis();\n', '$redis->connect($endpoint, $port);\n', '$redis->set(\'testkey\', \'Hello World!\');' '\n', '$return = $redis->get(\'testkey\');\n', '\n', 'echo \"<p>Retrieved value: $return</p>\";' '\n', '?>\n' ]), mode='000644', owner='apache', group='apache'), '/etc/cron.d/get_cluster_config': cloudformation.InitFile( content='*/5 * * * * root' ' /usr/local/bin/get_cluster_config', mode='000644', owner='root', group='root'), '/usr/local/bin/get_cluster_config': cloudformation.InitFile(content=Join( '', [ '#! /bin/bash\n', 'aws elasticache describe-cache-clusters ', ' --cache-cluster-id ', Ref('RedisCluster'), ' --show-cache-node-info' ' --region ', Ref('AWS::Region'), ' > /tmp/cacheclusterconfig\n' ]), mode='000755', owner='root', group='root'), '/usr/local/bin/install_phpredis': cloudformation.InitFile(content=Join( '', [ '#! /bin/bash\n', 'cd /tmp\n', 'wget https://github.com/nicolasff/' 'phpredis/zipball/master -O phpredis.zip' '\n', 'unzip phpredis.zip\n', 'cd nicolasff-phpredis-*\n', 'phpize\n', './configure\n', 'make && make install\n', 'touch /etc/php.d/redis.ini\n', 'echo extension=redis.so > /etc/php.d/' 'redis.ini\n' ]), mode='000755', owner='root', group='root'), '/etc/cfn/cfn-hup.conf': cloudformation.InitFile(content=Join( '', [ '[main]\n', 'stack=', Ref('AWS::StackId'), '\n', 'region=', Ref('AWS::Region'), '\n' ]), mode='000400', owner='root', group='root'), '/etc/cfn/hooks.d/cfn-auto-reloader.conf': cloudformation. InitFile(content=Join('', [ '[cfn-auto-reloader-hook]\n', 'triggers=post.update\n', 'path=Resources.WebServerInstance.Metadata' '.AWS::CloudFormation::Init\n', 'action=/opt/aws/bin/cfn-init -v ', ' --stack ', Ref('AWS::StackName'), ' --resource WebServerInstance ', ' --region ', Ref('AWS::Region'), '\n', 'runas=root\n' ]), # Why doesn't the Amazon template have this? # mode='000400', # owner='root', # group='root' ), }), commands={ '01-install_phpredis': { 'command': '/usr/local/bin/install_phpredis' }, '02-get-cluster-config': { 'command': '/usr/local/bin/get_cluster_config' } }, services={ "sysvinit": cloudformation.InitServices({ "httpd": cloudformation.InitService( enabled=True, ensureRunning=True, ), "cfn-hup": cloudformation.InitService( enabled=True, ensureRunning=True, files=[ '/etc/cfn/cfn-hup.conf', '/etc/cfn/hooks.d/' 'cfn-auto-reloader.conf' ]), }), }, ) })), ImageId=FindInMap( 'AWSRegionArch2AMI', Ref('AWS::Region'), FindInMap('AWSInstanceType2Arch', Ref(instancetype), 'Arch')), InstanceType=Ref(instancetype), SecurityGroups=[Ref(webserversg)], KeyName=Ref(keyname), IamInstanceProfile=Ref(webserverinstanceprofile), UserData=Base64( Join('', [ '#!/bin/bash -xe\n', 'yum update -y aws-cfn-bootstrap\n', '# Setup the PHP sample application\n', '/opt/aws/bin/cfn-init -v ', ' --stack ', Ref('AWS::StackName'), ' --resource WebServerInstance ', ' --region ', Ref('AWS::Region'), '\n', '# Signal the status of cfn-init\n', '/opt/aws/bin/cfn-signal -e $? ', ' --stack ', Ref('AWS::StackName'), ' --resource WebServerInstance ', ' --region ', Ref('AWS::Region'), '\n' ])), CreationPolicy=CreationPolicy(ResourceSignal=ResourceSignal( Timeout='PT15M')), Tags=Tags(Application=Ref('AWS::StackId'), Details='Created using Troposhpere'))) redisclustersg = template.add_resource( elasticache.SecurityGroup( 'RedisClusterSecurityGroup', Description='Lock the cluster down', )) template.add_resource( elasticache.SecurityGroupIngress( 'RedisClusterSecurityGroupIngress', CacheSecurityGroupName=Ref(redisclustersg), EC2SecurityGroupName=Ref(webserversg), )) template.add_resource( elasticache.CacheCluster( 'RedisCluster', Engine='redis', CacheNodeType=Ref(cachenodetype), NumCacheNodes='1', CacheSecurityGroupNames=[Ref(redisclustersg)], )) # Outputs template.add_output([ Output('WebsiteURL', Description='Application URL', Value=Join('', [ 'http://', GetAtt(webserverinstance, 'PublicDnsName'), ])) ]) # Print CloudFormation Template print(template.to_json())
def buildMySQL(t, args): t.add_resource( ec2.SecurityGroup( 'DBSecurityGroup', GroupDescription='Patient Records', VpcId=Ref('VPC'), Tags=Tags(Name='MySQL Access') ) ) t.add_resource( ec2.SecurityGroupIngress( 'DBSGIngress', GroupId=Ref('DBSecurityGroup'), IpProtocol='-1', SourceSecurityGroupId=Ref('ApplicationSecurityGroup') ) ) t.add_resource( rds.DBSubnetGroup( 'RDSSubnetGroup', DBSubnetGroupDescription='MySQL node locations', SubnetIds=[Ref('PrivateSubnet1'), Ref('PrivateSubnet2')] ) ) if (args.recovery): t.add_resource( rds.DBInstance( 'RDSInstance', DeletionPolicy='Delete' if args.dev else 'Snapshot', DBSnapshotIdentifier=Ref('RecoveryRDSSnapshotARN'), DBInstanceClass=Ref('RDSInstanceSize'), PubliclyAccessible=False, DBSubnetGroupName=Ref('RDSSubnetGroup'), VPCSecurityGroups=[Ref('DBSecurityGroup')], MultiAZ=not args.dev, Tags=Tags(Name='Patient Records') ) ) else: t.add_resource( rds.DBInstance( 'RDSInstance', DeletionPolicy='Delete' if args.dev else 'Snapshot', DBName='openemr', AllocatedStorage=Ref('PatientRecords'), DBInstanceClass=Ref('RDSInstanceSize'), Engine='MySQL', EngineVersion=FindInMap( 'RegionData', ref_region, 'MySQLVersion'), MasterUsername='******', MasterUserPassword=Ref('RDSPassword'), PubliclyAccessible=False, DBSubnetGroupName=Ref('RDSSubnetGroup'), VPCSecurityGroups=[Ref('DBSecurityGroup')], KmsKeyId=OpenEMRKeyID, StorageEncrypted=True, MultiAZ=not args.dev, Tags=Tags(Name='Patient Records') ) ) return t
ec2.SecurityGroup( "SecurityGroup", GroupDescription="Allow SSH and TCP/{} access".format(ApplicationPort), GroupName="FrontServer-SG", VpcId=VpcID, SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="80", ToPort="80", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="9000", ToPort="9000", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="8000", ToPort="8000", CidrIp="0.0.0.0/0", ), ], ))
from troposphere import Ref, Template, Parameter, Output, Join, GetAtt, Base64, FindInMap import troposphere.ec2 as ec2 t = Template() #Securi group sg = ec2.SecurityGroup("LampSg") sg = ec2.SecurityGroup("LampSg") sg.GroupDescription = "Allow access through ports 80 and 22" sg.SecurityGroupIngress = [ ec2.SecurityGroupRule(IpProtocol="tcp", FromPort="80", ToPort="80", CidrIp="0.0.0.0/0"), ec2.SecurityGroupRule(IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="0.0.0.0/0") ] t.add_resource(sg) keypair = t.add_parameter( Parameter( "KeyName", Description= "Name of the ssh keky pair that will be used to access the instance", Type="String")) t.add_mapping( 'RegionMap', { "us-east-1": { "AMI": "ami-035be7bafff33b6b6" }, "us-west-1": {
def create_security_groups(self): t = self.template t.add_resource( ec2.SecurityGroup(CLUSTER_SG_NAME, GroupDescription="EmpireMinionSecurityGroup", VpcId=Ref("VpcId"))) t.add_output(Output("SecurityGroup", Value=Ref(CLUSTER_SG_NAME))) # Allow all ports within cluster t.add_resource( ec2.SecurityGroupIngress( "EmpireMinionAllTCPAccess", IpProtocol="-1", FromPort="-1", ToPort="-1", SourceSecurityGroupId=Ref(CLUSTER_SG_NAME), GroupId=Ref(CLUSTER_SG_NAME))) # Application ELB Security Groups # Internal for elb in ("public", "private"): group_name = "Empire%sAppELBSG" % elb.capitalize() t.add_resource( ec2.SecurityGroup(group_name, GroupDescription=group_name, VpcId=Ref("VpcId"), Tags=Tags(Name="%s-app-elb-sg" % elb))) t.add_output( Output("%sAppELBSG" % elb.capitalize(), Value=Ref(group_name))) # Allow ELB to talk to cluster on 9000-10000 t.add_resource( ec2.SecurityGroupIngress("Empire%sAppPort9000To10000" % elb.capitalize(), IpProtocol="tcp", FromPort=9000, ToPort=10000, SourceSecurityGroupId=Ref(group_name), GroupId=Ref(CLUSTER_SG_NAME))) # When using Application Load Balancing, the port is chosen at # random within an ephemeral port range. t.add_resource( ec2.SecurityGroupIngress("Empire%sAppPort32768To61000" % elb.capitalize(), IpProtocol="tcp", FromPort=32768, ToPort=61000, SourceSecurityGroupId=Ref(group_name), GroupId=Ref(CLUSTER_SG_NAME))) # Allow anything to talk to the ELB # If internal only internal hosts will be able to talk to # the elb t.add_resource( ec2.SecurityGroupIngress("Empire%sELBAllow80" % elb.capitalize(), IpProtocol="tcp", FromPort=80, ToPort=80, CidrIp="0.0.0.0/0", GroupId=Ref(group_name))) t.add_resource( ec2.SecurityGroupIngress("Empire%sELBAllow443" % elb.capitalize(), IpProtocol="tcp", FromPort=443, ToPort=443, CidrIp="0.0.0.0/0", GroupId=Ref(group_name)))
def create_security_groups(self): app_server_lb_security_group_name = 'sgAppServerLoadBalancer' app_server_lb_security_group = self.add_resource(ec2.SecurityGroup( app_server_lb_security_group_name, GroupDescription='Enables access to application servers via a ' 'load balancer', VpcId=Ref(self.vpc_id), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol='tcp', CidrIp=ALLOW_ALL_CIDR, FromPort=p, ToPort=p ) for p in [HTTP, HTTPS] ], SecurityGroupEgress=[ ec2.SecurityGroupRule( IpProtocol='tcp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p ) for p in [HTTP] ], Tags=self.get_tags(Name=app_server_lb_security_group_name) )) app_server_security_group_name = 'sgAppServer' app_server_security_group = self.add_resource(ec2.SecurityGroup( app_server_security_group_name, DependsOn='sgAppServerLoadBalancer', GroupDescription='Enables access to application servers', VpcId=Ref(self.vpc_id), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol='tcp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p ) for p in [SSH, HTTP] ] + [ ec2.SecurityGroupRule( IpProtocol='tcp', SourceSecurityGroupId=Ref(sg), FromPort=HTTP, ToPort=HTTP ) for sg in [app_server_lb_security_group] ], SecurityGroupEgress=[ ec2.SecurityGroupRule( IpProtocol='tcp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p ) for p in [GRAPHITE, POSTGRESQL, REDIS, STATSITE, RELP] ] + [ ec2.SecurityGroupRule( IpProtocol='udp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p ) for p in [STATSITE] ] + [ ec2.SecurityGroupRule( IpProtocol='tcp', CidrIp=ALLOW_ALL_CIDR, FromPort=p, ToPort=p ) for p in [HTTP, HTTPS] ], Tags=self.get_tags(Name=app_server_security_group_name) )) return app_server_lb_security_group, app_server_security_group
t.add_parameter( Parameter("PublicSubnet", Description="PublicSubnet", Type="List<AWS::EC2::Subnet::Id>", ConstraintDescription="PublicSubnet")) t.add_resource( ec2.SecurityGroup("SecurityGroup", GroupDescription="Allow SSH and private network access", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=0, ToPort=65535, CidrIp="172.16.0.0/12", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp=PublicCidrIp, ), ], VpcId=Ref("VpcId"))) t.add_resource(Cluster('ECSCluster', )) t.add_resource( Role( 'EcsClusterRole', ManagedPolicyArns=[
def __init__(self, parameters): super(Vpn, self).__init__() self.VpnSG = ec2.SecurityGroup( "VpnSG", SecurityGroupIngress=[ ec2.SecurityGroupRule( ToPort=22, IpProtocol="tcp", CidrIp="180.181.214.196/32", FromPort=22 ), ec2.SecurityGroupRule( ToPort=943, IpProtocol="tcp", CidrIp="180.181.214.196/32", FromPort=943 ), ec2.SecurityGroupRule( ToPort=443, IpProtocol="tcp", CidrIp="180.181.214.196/32", FromPort=443 ), ec2.SecurityGroupRule( ToPort=1194, IpProtocol="udp", CidrIp="180.181.214.196/32", FromPort=1194 ), ], VpcId=ImportValue("master-vpc"), GroupDescription="SG for VPN external connectivity", Tags=Tags( Name=Join("-", [Ref(AWS_STACK_NAME), "vpn-sg"]), ), ) self.VpnInstance = ec2.Instance( "VpnInstance", Tags=Tags( Name=Join(".", [ Ref(AWS_STACK_NAME), ImportValue("master-zone-internal-name") ]), Ansible="true", ), SecurityGroupIds=[ Ref(self.VpnSG) ], SubnetId=ImportValue("master-subnet-shared-public-a"), ImageId=Ref(parameters.InstanceImage.title), IamInstanceProfile=ImportValue("iam-role-ec2-baseline"), SourceDestCheck=False, BlockDeviceMappings=[ ec2.BlockDeviceMapping( DeviceName="/dev/sda1", Ebs=ec2.EBSBlockDevice( VolumeSize=Ref(parameters.InstanceStorageOS.title), VolumeType="gp2" )), ], KeyName=Ref(parameters.InstanceKeyPair.title), InstanceType=Ref(parameters.InstanceType.title) ) self.VpnInstanceEIP = ec2.EIP( "VpnInstanceEIP", InstanceId=Ref(self.VpnInstance), Domain="vpc", ) self.VpnPrivateRecord = route53.RecordSetType( "VpnPrivateRecord", HostedZoneId=ImportValue("master-zone-internal-id"), Name=Join(".", [ Ref(AWS_STACK_NAME), ImportValue("master-zone-internal-name") ]), ResourceRecords=[GetAtt(self.VpnInstance, "PrivateIp")], Type="A", TTL="3600", ) self.VpnPublicRecord = route53.RecordSetType( "VpnPublicRecord", HostedZoneId=FindInMap("VPCResourcesMap", Ref(parameters.VPC.title), "Route53PublicZone"), Name=Join(".", [ Ref(AWS_STACK_NAME), ImportValue("master-zone-internal-name") ]), ResourceRecords=[Ref(self.VpnInstanceEIP)], Type="A", TTL="3600", )
t.add_mapping("RegionMap", { "us-east-1": { "AMI": "ami-7f418316" }, "us-west-1": { "AMI": "ami-951945d0" } }) frontend_ec2_sg = t.add_resource( ec2.SecurityGroup( "rzienertHttpSecurityGroup", VpcId=Ref(vpcid_param), GroupDescription="Enable HTTP traffic for frontend class servers", SecurityGroupIngress=[ ec2.SecurityGroupRule(IpProtocol="tcp", FromPort="80", ToPort="80", CidrIp="0.0.0.0/0") ])) ec2_instance = t.add_resource( ec2.Instance("rzienertEC2Instance", ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), InstanceType="t1.micro", SubnetId=Ref(subnetid_param), UserData=Base64("80"), Tags=Tags(Name="rzienertVpcInstance"))) elb = t.add_resource( elb.LoadBalancer("rzienertLoadBalancer",
def configure(self): """ Returns a cassandra template with seed nodes """ self.add_description('Sets up Cassandra in all Zones') self.get_eni_policies() self.get_default_security_groups() self.get_standard_parameters() self.get_standard_policies() _global_config = constants.ENVIRONMENTS[self.env] self.ami = self.add_parameter( Parameter('AMI', Type='String', Description='AMI ID for instances', Default=get_latest_ami_id( self.region, 'ivy-cassandra', _global_config.get('ami_owner', 'self')))) _cassandra_security_group = self.add_resource( ec2.SecurityGroup( '{}SecurityGroup'.format(self.name), VpcId=self.vpc_id, GroupDescription='Security Group for {} Instances'.format( self.name), SecurityGroupIngress=[ { 'IpProtocol': 'tcp', 'FromPort': 7000, 'ToPort': 7001, 'CidrIp': self.vpc_cidr }, # inter-node { 'IpProtocol': 'tcp', 'FromPort': 7199, 'ToPort': 7199, 'CidrIp': self.vpc_cidr }, # jmx { 'IpProtocol': 'tcp', 'FromPort': 9042, 'ToPort': 9042, 'CidrIp': self.vpc_cidr }, # client port { 'IpProtocol': 'tcp', 'FromPort': 9160, 'ToPort': 9160, 'CidrIp': self.vpc_cidr }, # client (thrift) ])) self.add_resource( ec2.SecurityGroupIngress( '{}IngressSecurityGroup'.format(self.name), GroupId=Ref(_cassandra_security_group), IpProtocol='-1', FromPort=-1, ToPort=-1, SourceSecurityGroupId=Ref(_cassandra_security_group ) # this allows members all traffic )) self.add_security_group(Ref(_cassandra_security_group)) # Add support for creating EBS snapshots and tagging them self.add_iam_policy( iam.Policy(PolicyName='CassandraBackups', PolicyDocument={ 'Statement': [{ 'Effect': 'Allow', 'Resource': '*', 'Action': [ 'ec2:AttachVolume', 'ec2:CreateSnapshot', 'ec2:CreateTags', 'ec2:DeleteSnapshot', 'ec2:DescribeInstances', 'ec2:DescribeSnapshots', 'ec2:DescribeTags', 'ec2:DescribeVolumeAttribute', 'ec2:DescribeVolumeStatus', 'ec2:DescribeVolumes', 'ec2:DetachVolume' ] }] })) for cluster in constants.ENVIRONMENTS[ self.env]['cassandra']['clusters']: for _instance in cluster['instances']: subnet = [ s for s in self.get_subnets('private') if netaddr.IPAddress(_instance['ip']) in netaddr.IPNetwork( s['CidrBlock']) ][0] service = 'cassandra-{}'.format(cluster['name']) role = '-'.join([ self.name, cluster['name'], subnet['AvailabilityZone'], _instance['ip'] ]) tags = self.get_tags(service_override=service, role_override=role) # Create ENI for this server, and hold onto a Ref for it so we can feed it into the userdata uniq_id = hashlib.md5(role.encode('utf-8')).hexdigest()[:10] eni = ec2.NetworkInterface( self.name + cluster['name'] + "ENI" + uniq_id, Description= 'Cassandra: Cluster: {} ENV: {} PrivateSubnet {}'.format( cluster['name'], self.env, subnet['SubnetId']), GroupSet=self.security_groups, PrivateIpAddress=_instance['ip'], SourceDestCheck=True, SubnetId=subnet['SubnetId'], Tags=tags, ) self.add_resource(eni) # Add the rootfs _block_device_mapping = get_block_device_mapping( self.parameters['InstanceType'].resource['Default']) _block_device_mapping += { ec2.BlockDeviceMapping(DeviceName="/dev/xvda", Ebs=ec2.EBSBlockDevice( DeleteOnTermination=True, VolumeSize=cluster.get( 'rootfs_size', 20), VolumeType="gp2", )) } # Seed the cluster from one node in the remote DC, plus three nodes in this DC # We want to avoid making too many nodes into seeds if cluster.get('remote_seed'): remote_env_name = cluster['remote_seed']['datacenter'] remote_cluster_name = cluster['remote_seed']['cluster'] remote_clusters = constants.ENVIRONMENTS[remote_env_name][ 'cassandra']['clusters'] # filter to just the remote cluster in the remote DC and return that one only remote_cluster = list( filter(lambda x: x['name'] == remote_cluster_name, remote_clusters))[0] remote_seeds = [ i['ip'] for i in remote_cluster['instances'] ][:1] local_seeds = [i['ip'] for i in cluster['instances']][:3] seeds = ','.join(remote_seeds + local_seeds) else: # Use the first three cassandra nodes as seeds seeds = ','.join([i['ip'] for i in cluster['instances']][:3]) if cluster.get('data_volume_size'): # Create the EBS volume data_volume = ec2.Volume( '{}{}DataVolume{}'.format( self.name, cluster['name'], uniq_id ), # something like 'envnameCassandraappDataVolumec47145e176' Size=cluster.get('data_volume_size', 20), VolumeType='gp2', AvailabilityZone=subnet['AvailabilityZone'], DeletionPolicy='Retain', Tags=tags + [ec2.Tag('Name', role + "-datavol")]) self.add_resource(data_volume) else: data_volume = None # Create the user data in two phases # Phase 1: substitute from constants in Rain user_data_template = self.get_cloudinit_template( cluster['cassandra_template'], replacements=(('__PROMPT_COLOR__', self.prompt_color()), ('__CASSANDRA_CLUSTER__', cluster['name']), ('__CASSANDRA_CLUSTER_OVERRIDE__', cluster.get('cluster_name_override', "")), ('__CASSANDRA_SEEDS__', seeds), ('__SERVICE__', service))) # Phase 2: Allow AWS Cloudformation to further substitute Ref()'s in the userdata userdata = Base64( Sub( user_data_template.replace( '${', '${!' ) # Replace bash brackets with CFN escaped style .replace( '{#', '${' ), # Replace rain-style CFN escapes with proper CFN brackets { 'CFN_ENI_ID': Ref(eni), 'CFN_DATA_EBS_VOLUME_ID': Ref(data_volume) if data_volume else "" })) # Create the Launch Configuration / ASG _instance_type = cluster.get('instance_type', Ref(self.instance_type)) launch_configuration = self.add_resource( autoscaling.LaunchConfiguration( '{}{}LaunchConfiguration{}'.format( self.name, cluster['name'], uniq_id), AssociatePublicIpAddress=False, BlockDeviceMappings=_block_device_mapping, EbsOptimized=True if _instance_type in EBS_OPTIMIZED_INSTANCES else False, ImageId=Ref(self.ami), InstanceType=_instance_type, InstanceMonitoring=False, IamInstanceProfile=Ref(self.instance_profile), KeyName=Ref(self.keypair_name), SecurityGroups=self.security_groups, UserData=userdata)) self.add_resource( autoscaling.AutoScalingGroup( '{}{}ASGroup{}'.format(self.name, cluster['name'], uniq_id), AvailabilityZones=[subnet['AvailabilityZone']], HealthCheckType='EC2', LaunchConfigurationName=Ref(launch_configuration), MinSize=1, MaxSize=1, VPCZoneIdentifier=[subnet['SubnetId']], Tags=self.get_autoscaling_tags( service_override=service, role_override=role) + [autoscaling.Tag('Name', role, True)]))
def main(): template = Template() AddAMI(template) # Add the Parameters keyname_param = template.add_parameter( Parameter( "KeyName", Type="String", Default="mark", Description="Name of an existing EC2 KeyPair to " "enable SSH access to the instance", )) template.add_parameter( Parameter( "InstanceType", Type="String", Description="WebServer EC2 instance type", Default="m1.small", AllowedValues=[ "t1.micro", "m1.small", "m1.medium", "m1.large", "m1.xlarge", "m2.xlarge", "m2.2xlarge", "m2.4xlarge", "c1.medium", "c1.xlarge", "cc1.4xlarge", "cc2.8xlarge", "cg1.4xlarge" ], ConstraintDescription="must be a valid EC2 instance type.", )) webport_param = template.add_parameter( Parameter( "WebServerPort", Type="String", Default="8888", Description="TCP/IP port of the web server", )) # Define the instance security group instance_sg = template.add_resource( ec2.SecurityGroup( "InstanceSecurityGroup", GroupDescription="Enable SSH and HTTP access on the inbound port", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=Ref(webport_param), ToPort=Ref(webport_param), CidrIp="0.0.0.0/0", ), ])) # Add the web server instances web_instances = [] for name in ("Ec2Instance1", "Ec2Instance2"): instance = template.add_resource( ec2.Instance( name, SecurityGroups=[Ref(instance_sg)], KeyName=Ref(keyname_param), InstanceType=Ref("InstanceType"), ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), UserData=Base64(Ref(webport_param)), )) web_instances.append(instance) elasticLB = template.add_resource( elb.LoadBalancer( 'ElasticLoadBalancer', AccessLoggingPolicy=elb.AccessLoggingPolicy( EmitInterval=5, Enabled=True, S3BucketName="logging", S3BucketPrefix="myELB", ), AvailabilityZones=GetAZs(""), ConnectionDrainingPolicy=elb.ConnectionDrainingPolicy( Enabled=True, Timeout=300, ), CrossZone=True, Instances=[Ref(r) for r in web_instances], Listeners=[ elb.Listener( LoadBalancerPort="80", InstancePort=Ref(webport_param), Protocol="HTTP", ), ], HealthCheck=elb.HealthCheck( Target=Join("", ["HTTP:", Ref(webport_param), "/"]), HealthyThreshold="3", UnhealthyThreshold="5", Interval="30", Timeout="5", ))) template.add_output( Output("URL", Description="URL of the sample website", Value=Join("", ["http://", GetAtt(elasticLB, "DNSName")]))) print(template.to_json())
def main(): '''Function: Generates the Cloudformation template''' template = Template() template.add_description("Dev Stack") keyname_param = template.add_parameter( Parameter( 'KeyName', Description='An existing EC2 KeyPair.', ConstraintDescription='An existing EC2 KeyPair.', Type='AWS::EC2::KeyPair::KeyName', ) ) db_pass_param = template.add_parameter( Parameter( 'DBPass', NoEcho=True, Type='String', Description='The database admin account password', ConstraintDescription='Must contain only alphanumeric characters', AllowedPattern="[-_a-zA-Z0-9]*", ) ) db_name_param = template.add_parameter( Parameter( 'DBName', Default='miramax', Type='String', Description='The database name', ConstraintDescription='Must begin with a letter and contain only alphanumeric characters', AllowedPattern="[-_a-zA-Z0-9]*", ) ) db_user_param = template.add_parameter( Parameter( 'DBUser', Default='miramax', Type='String', Description='Username for MySQL database access', ConstraintDescription='Must begin with a letter and contain only alphanumeric characters', AllowedPattern="[-_a-zA-Z0-9]*", ) ) template.add_mapping('RegionMap', create_ami_region_map()) ec2_security_group = template.add_resource( ec2.SecurityGroup( 'EC2SecurityGroup', Tags=[{'Key':'Name', 'Value':Ref('AWS::StackName')},], GroupDescription='EC2 Security Group', SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol='tcp', FromPort='22', ToPort='22', CidrIp='0.0.0.0/0', Description='SSH'), ec2.SecurityGroupRule( IpProtocol='tcp', FromPort='80', ToPort='80', CidrIp='0.0.0.0/0', Description='HTTP'), ec2.SecurityGroupRule( IpProtocol='tcp', FromPort='443', ToPort='443', CidrIp='0.0.0.0/0', Description='HTTPS'), ], ) ) db_security_group = template.add_resource( ec2.SecurityGroup( 'DBSecurityGroup', Tags=[{'Key':'Name', 'Value':Ref('AWS::StackName')},], GroupDescription='DB Security Group', SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol='tcp', FromPort='3306', ToPort='3306', SourceSecurityGroupId=GetAtt(ec2_security_group, "GroupId"), Description='MySQL'), ] ) ) ec2_instance = template.add_resource( ec2.Instance( 'Instance', Metadata=Metadata( Init({ "config": InitConfig( files=InitFiles({ "/tmp/instance.txt": InitFile( content=Ref('AWS::StackName'), mode="000644", owner="root", group="root" ) }), ) }), ), CreationPolicy=CreationPolicy( ResourceSignal=ResourceSignal(Timeout='PT15M') ), Tags=[{'Key':'Name', 'Value':Ref('AWS::StackName')},], ImageId=FindInMap('RegionMap', Ref('AWS::Region'), 'ami'), InstanceType='t2.micro', KeyName=Ref(keyname_param), SecurityGroups=[Ref(ec2_security_group), Ref(db_security_group)], DependsOn='Database', UserData=Base64( Join( '', [ '#!/bin/bash -x\n', 'exec > /tmp/user-data.log 2>&1\n' 'unset UCF_FORCE_CONFFOLD\n', 'export UCF_FORCE_CONFFNEW=YES\n', 'ucf --purge /boot/grub/menu.lst\n', 'export DEBIAN_FRONTEND=noninteractive\n', 'apt-get update\n', 'apt-get -o Dpkg::Options::="--force-confnew" --force-yes -fuy upgrade\n', 'apt-get install -y python-pip\n', 'pip install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz\n', 'apt-get install -y apache2\n', '# Signal Cloudformation when set up is complete\n', '/usr/local/bin/cfn-signal -e $? --resource=Instance --region=', Ref('AWS::Region'), ' --stack=', Ref('AWS::StackName'), '\n', ] ) ) ) ) db_instance = template.add_resource( DBInstance( 'Database', DBName=Ref(db_name_param), AllocatedStorage=20, DBInstanceClass='db.t2.micro', Engine='MySQL', EngineVersion='5.7.21', MasterUsername=Ref(db_user_param), MasterUserPassword=Ref(db_pass_param), VPCSecurityGroups=[GetAtt(db_security_group, "GroupId")], ) ) template.add_output([ Output( 'InstanceDnsName', Description='PublicDnsName', Value=GetAtt(ec2_instance, 'PublicDnsName'), ), Output( 'DatabaseDnsName', Description='DBEndpoint', Value=GetAtt(db_instance, 'Endpoint.Address'), ), ]) print(template.to_yaml())
def __init__(self, tags=dict()): super(RDSFactory, self).__init__() self.tags = tags # Largely copied from # https://github.com/cloudtools/troposphere/blob/master/examples/RDS_VPC.py # Each parameter is followed by the resources which depend on it. # VPC and security groups vpcid = Parameter('VpcId', Type='String', Description='Id of existing VPC') private_hosted_zone_id = Parameter( 'PrivateHostedZoneId', Type='String', Description='Private hosted zone id') db_security_group = ec2.SecurityGroup( 'sgDatabase', GroupDescription='Security group for RDS DB Instance.', VpcId=Ref(vpcid), Tags=Tags(Name='Database', **self.tags)) # Subnets subnets = Parameter( 'AppServerSubnets', Type='CommaDelimitedList', Description='List of SubnetIds spanning at least two AZs in VPC') subnet_group = rds.DBSubnetGroup( 'CacDbSubnetGroup', DBSubnetGroupDescription='Subnets available for Cac RDS instance', SubnetIds=Ref(subnets), Tags=Tags(Name='RDSSubnetGroup', **self.tags)) # Database db_name = Parameter( 'DbName', Description='Name of the database to be created', Type='String', MinLength='5', MaxLength='63', AllowedPattern='[a-zA-Z_][a-zA-Z0-9_]*', ConstraintDescription= 'Name must begin with a letter and contain only alphanumerics') db_user = Parameter( 'DbUser', NoEcho=True, Description='Database admin user account', Type='String', MinLength='5', MaxLength='16', AllowedPattern='[a-zA-Z][a-zA-Z0-9]*', ConstraintDescription= 'Name must begin with a letter and contain only alphanumerics') db_password = Parameter( 'DbPassword', NoEcho=True, Description='Database admin account password', Type='String', MinLength='8', ) db_instance_class = Parameter('DbInstanceClass', Default='db.m3.medium', Description='Database instance class', Type='String', AllowedValues=RDS_INSTANCE_TYPES) db_storage = Parameter( 'DbStorage', Description='Available database storage (GB)', Default='100', Type='Number', MaxValue='1024', ConstraintDescription='Storage space must be less than 1024GB', ) db_dns_name = Parameter('DbDNSName', Type='String', Description='Private DNS name for database') database = rds.DBInstance('CacDb', DBName=Ref(db_name), AllocatedStorage=Ref(db_storage), DBInstanceClass=Ref(db_instance_class), Engine='postgres', EngineVersion='9.4', MasterUsername=Ref(db_user), MasterUserPassword=Ref(db_password), DBSubnetGroupName=Ref(subnet_group), VPCSecurityGroups=[Ref(db_security_group)], MultiAZ=True, Tags=Tags(Name='CacDB', **self.tags)) db_dns_record = route53.RecordSetType( 'rsDatabase', Name=Ref(db_dns_name), ResourceRecords=[GetAtt('CacDb', 'Endpoint.Address')], TTL=600, Type='CNAME', HostedZoneId=Ref(private_hosted_zone_id), ) # Outputs rds_endpoint = Output( 'CacDbEndpoint', Description='Endpoint to which Postgres clients should connect', Value=GetAtt('CacDb', 'Endpoint.Address')) database_name = Output( 'CacDbName', Description='Name of database created on Cac RDS instance', Value=Ref(db_name)) db_sg = Output('DatabaseSecurityGroup', Description='Security Group of Database', Value=GetAtt('sgDatabase', 'GroupId')) self.parameters = [ vpcid, private_hosted_zone_id, subnets, db_name, db_user, db_password, db_instance_class, db_storage, db_dns_name ] self.resources = [ db_security_group, subnet_group, database, db_dns_record ] self.outputs = [rds_endpoint, database_name, db_sg]
from troposphere import Ref, Template, Parameter, Output, Join, GetAtt, Base64 import troposphere.ec2 as ec2 t = Template() sg = ec2.SecurityGroup("MernSg") sg.GroupDescription = "Allow access to ports 80, 443 and 22 to the web server" sg.SecurityGroupIngress = [ ec2.SecurityGroupRule(IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="0.0.0.0/0"), ec2.SecurityGroupRule(IpProtocol="tcp", FromPort="80", ToPort="80", CidrIp="0.0.0.0/0"), ec2.SecurityGroupRule(IpProtocol="tcp", FromPort="443", ToPort="443", CidrIp="0.0.0.0/0"), ] t.add_resource(sg) keypair = t.add_parameter( Parameter( "KeyName", Description= "Name of the SSH key pair that will be used to access the instance", Type="String")) instance = ec2.Instance("Webserver")
t.add_parameter( Parameter( "KeyPair", Description="herculano_devops", Type="AWS::EC2::KeyPair::KeyName", ConstraintDescription="herculano_devops", )) t.add_resource( ec2.SecurityGroup( "SecurityGroup", GroupDescription="Allow SSH and TCP/ {} access".format(port), SecurityGroupIngress=[ ec2.SecurityGroupRule(IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp=PublicCidr), ec2.SecurityGroupRule(IpProtocol="tcp", FromPort=port, ToPort=port, CidrIp="0.0.0.0/0"), ], )) ud = Base64( Join('\n', [ "#!/bin/bash", "yum install --enablerepo=epel -y git", "pip install ansible", AnsiblePullCmd, "echo '*/10 * * * * {}' > /etc/cron.d/ansible-pull".format( AnsiblePullCmd) ]))
def west_instance_stack(cfn_file): template = Template() key_name_param = template.add_parameter( Parameter( "keyName", Description="string of vpc cidr block to use", Type="String", )) ami_id_param = template.add_parameter( Parameter("amiId", Description="string of vpc cidr block to use", Type="AWS::EC2::Image::Id")) instance_type_param = template.add_parameter( Parameter( "instanceType", Description="string of vpc cidr block to use", Type="String", )) sg = template.add_resource( ec2.SecurityGroup( "MySg", GroupDescription="who cares", VpcId=ImportValue("VpcId-jdix"), Tags=resource_tags, )) sshIn = template.add_resource( ec2.SecurityGroupIngress("MySshIn", CidrIp="0.0.0.0/0", IpProtocol="tcp", FromPort=22, ToPort=22, GroupId=Ref(sg))) pingIn = template.add_resource( ec2.SecurityGroupIngress("MyPingIn", CidrIp="0.0.0.0/0", IpProtocol="icmp", FromPort=-1, ToPort=-1, GroupId=Ref(sg))) instance = template.add_resource( ec2.Instance( "MyInstance", ImageId=Ref(ami_id_param), SubnetId=ImportValue("SubnetId-jdix"), InstanceType=Ref(instance_type_param), KeyName=Ref(key_name_param), Tags=resource_tags, SecurityGroupIds=[Ref(sg)], )) template.add_output([ Output( "InstanceId", Description="InstanceId of the newly created EC2 instance", Value=Ref(instance), Export=Export("InstanceId-jdix"), ), Output( "InstancePrivateIP", Description="InstanceId of the newly created EC2 instance", Value=GetAtt(instance, "PrivateIp"), Export=Export("InstancePrivateIP-jdix"), ) ]) template_out_yaml(cfn_file, template)
t.add_parameter( Parameter("PublicSubnet", Description="PublicSubnet", Type="List<AWS::EC2::Subnet::Id>", ConstraintDescription="PublicSubnet")) t.add_resource( ec2.SecurityGroup( "SecurityGroup", GroupDescription="Allow SSH and TCP/{} access".format(ApplicationPort), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp=PublicCidrIp, ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=ApplicationPort, ToPort=ApplicationPort, CidrIp="0.0.0.0/0", ), ], VpcId=Ref("VpcId"))) t.add_resource( ec2.SecurityGroup( "LoadBalancerSecurityGroup", GroupDescription="Web load balancer security group.", VpcId=Ref("VpcId"), SecurityGroupIngress=[
}, ), ) ], NetworkMode='awsvpc', )) keycloakServiceSG = t.add_resource( ec2.SecurityGroup( 'KeycloakSecurityGroup', GroupDescription='Keycloak Service SG', VpcId=ImportValue(Sub('${CoreStack}-VPC-ID')), SecurityGroupIngress=[{ 'IpProtocol': 'tcp', 'FromPort': 8080, 'ToPort': 8080, 'SourceSecurityGroupId': ImportValue(Sub('${CoreStack}-LoadBalancer-SG-ID')), }], )) keycloakService = t.add_resource( ecs.Service('KeycloakService', Cluster=ImportValue(Sub('${CoreStack}-ECS-Cluster')), TaskDefinition=keycloakTask.Ref(), DesiredCount=1, LoadBalancers=[ ecs.LoadBalancer(
def init_cloud(args): template = Template() queue = template.add_resource( Queue( "{0}".format(args.sqs_name), QueueName="{0}".format(args.sqs_name), )) bucket = template.add_resource( Bucket("{0}".format(args.s3_name), BucketName="{0}".format(args.s3_name))) kala_security_group = template.add_resource( ec2.SecurityGroup( "{0}".format(args.kala_security_group), GroupName="{0}".format(args.kala_security_group), GroupDescription="Enable HTTP and HTTPS access on the inbound port", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="80", ToPort="80", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="443", ToPort="443", CidrIp="0.0.0.0/0", ), ])) database_security_group = template.add_resource( ec2.SecurityGroup( "{0}".format(args.database_security_group), GroupName="{0}".format(args.database_security_group), GroupDescription="Enable Database access for the security groups", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="5432", ToPort="5432", SourceSecurityGroupName=Ref(kala_security_group), ), ])) database = template.add_resource( rds.DBInstance( "{0}".format(args.rds_instance_name), DBInstanceIdentifier="{0}".format(args.rds_instance_name), DBName=args.rds_name, MasterUsername="******".format(args.rds_username), MasterUserPassword="******".format(args.rds_password), AllocatedStorage=args.rds_allocated_storage, DBInstanceClass=args.rds_instance_class, Engine="postgres", MultiAZ=args.production, StorageEncrypted=True, VPCSecurityGroups=[GetAtt(database_security_group, "GroupId")])) s3_policy = PolicyDocument( Version="2012-10-17", Id="{0}Policy".format(args.s3_name), Statement=[ Statement(Effect="Allow", Action=[S3Action("*")], Resource=[Join("", [GetAtt(bucket, "Arn"), "/*"])]), ]) sqs_policy = PolicyDocument(Version="2012-10-17", Id="{0}Policy".format(args.s3_name), Statement=[ Statement(Effect="Allow", Action=[SQSAction("*")], Resource=[GetAtt(queue, "Arn")]) ]) role = Role('{0}Role'.format(args.iam_role), RoleName='{0}Role'.format(args.iam_role), AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": "sts:AssumeRole", "Effect": "Allow", "Principal": { "Service": "ec2.amazonaws.com" } }] }, Policies=[ Policy(PolicyName="KalaS3Policy", PolicyDocument=s3_policy), Policy(PolicyName="KalaSQSPolicy", PolicyDocument=sqs_policy) ]) template.add_resource(role) template.add_resource( InstanceProfile("{0}InstanceProfile".format(args.iam_role), Roles=[Ref(role)], InstanceProfileName="{0}InstanceProfile".format( args.iam_role))) return template
}) StorReduceWebSecurityGroup = t.add_resource( ec2.SecurityGroup( "StorReduceWebSecurityGroup", GroupDescription= "Enables remote access to port 80 and 443 for the StorReduce load balancer", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="80", ToPort="80", CidrIp=Ref(RemoteAccessCIDRParam), ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="443", ToPort="443", CidrIp=Ref(RemoteAccessCIDRParam), ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="8080", ToPort="8080", CidrIp=Ref(RemoteAccessCIDRParam), ), ], VpcId=Ref(VpcIdParam))) MonitorSecurityGroup = t.add_resource( ec2.SecurityGroup(
key_name = t.add_parameter( Parameter( 'KeyName', Type='AWS::EC2::KeyPair::KeyName', Description='Name of an existing EC2 KeyPair to enable SSH access')) ami_id = t.add_parameter( Parameter('AmiId', Type='String', Default='ami-98aa1cf0')) security_group = t.add_resource( ec2.SecurityGroup('SecurityGroup', GroupDescription='Allows SSH access from anywhere', SecurityGroupIngress=[ ec2.SecurityGroupRule(IpProtocol='tcp', FromPort=22, ToPort=22, CidrIp='0.0.0.0/0') ], Tags=Tags(Name='ops.cfninit-sg'))) ec2_instance = t.add_resource( ec2.Instance( 'Ec2Instance', ImageId=Ref(ami_id), InstanceType='t1.micro', KeyName=Ref(key_name), SecurityGroups=[Ref(security_group)], IamInstanceProfile='PullCredentials', UserData=Base64( Join('', [
def create_security_groups(self): worker_lb_security_group_name = 'sgWorkerLoadBalancer' worker_lb_security_group = self.add_resource( ec2.SecurityGroup( worker_lb_security_group_name, GroupDescription= 'Enables access to workers via a load balancer', VpcId=Ref(self.vpc_id), SecurityGroupIngress=[ ec2.SecurityGroupRule(IpProtocol='tcp', CidrIp=Ref(self.ip_access), FromPort=p, ToPort=p) for p in [HTTP] ], SecurityGroupEgress=[ ec2.SecurityGroupRule(IpProtocol='tcp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p) for p in [HTTP] ], Tags=self.get_tags(Name=worker_lb_security_group_name))) worker_security_group_name = 'sgWorker' worker_security_group = self.add_resource( ec2.SecurityGroup( worker_security_group_name, GroupDescription='Enables access to workers', VpcId=Ref(self.vpc_id), SecurityGroupIngress=[ ec2.SecurityGroupRule(IpProtocol='tcp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p) for p in [SSH, HTTP] ] + [ ec2.SecurityGroupRule(IpProtocol='tcp', SourceSecurityGroupId=Ref(sg), FromPort=HTTP, ToPort=HTTP) for sg in [worker_lb_security_group] ], SecurityGroupEgress=[ ec2.SecurityGroupRule(IpProtocol='tcp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p) for p in [GRAPHITE, POSTGRESQL, REDIS, STATSITE, RELP] ] + [ ec2.SecurityGroupRule(IpProtocol='udp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p) for p in [STATSITE] ] + [ ec2.SecurityGroupRule(IpProtocol='tcp', CidrIp=ALLOW_ALL_CIDR, FromPort=p, ToPort=p) for p in [HTTP, HTTPS] ], Tags=self.get_tags(Name=worker_security_group_name))) return worker_lb_security_group, worker_security_group
def main(): template = Template() template.add_version("2010-09-09") template.set_description("AWS CloudFormation ECS Service") # Add the Parameters Application = template.add_parameter( Parameter( "Application", Type="String", )) DockerImage = template.add_parameter( Parameter( "DockerImage", Type="String", )) ClusterName = template.add_parameter( Parameter( "ClusterName", Type="String", )) ContainerPort = template.add_parameter( Parameter( "ContainerPort", Type="String", )) HostPort = template.add_parameter(Parameter( "HostPort", Type="String", )) HostedZoneName = template.add_parameter( Parameter( "HostedZoneName", Type="String", )) CertArn = template.add_parameter(Parameter( "CertArn", Type="String", )) ExecutionRoleArn = template.add_parameter( Parameter("ExecutionRoleArn", Type="String", Description="Execution Role to get creadentials from ssm")) HealthCheckPath = template.add_parameter( Parameter( "HealthCheckPath", Type="String", )) HealthCheckIntervalSeconds = template.add_parameter( Parameter( "HealthCheckIntervalSeconds", Type="String", )) HealthyThresholdCount = template.add_parameter( Parameter( "HealthyThresholdCount", Type="String", )) HealthCheckTimeoutSeconds = template.add_parameter( Parameter( "HealthCheckTimeoutSeconds", Type="String", )) UnhealthyThresholdCount = template.add_parameter( Parameter( "UnhealthyThresholdCount", Type="String", )) VpcId = template.add_parameter(Parameter( "VpcId", Type="String", )) Subnets = template.add_parameter( Parameter( "Subnets", Type="List<AWS::EC2::Subnet::Id>", )) PrivateSubnets = template.add_parameter( Parameter( "PrivateSubnets", Type="List<AWS::EC2::Subnet::Id>", )) # Add the application ELB NetworkLB = template.add_resource( elb.LoadBalancer("NetworkLB", Name=Join("", [Ref(Application), "-nlb"]), Scheme="internet-facing", Subnets=Ref(Subnets), Type='network')) NlbTargetGroup = template.add_resource( elb.TargetGroup( "NlbTargetGroup", Name='ecs-fargate-service-targetgroup', HealthCheckIntervalSeconds=Ref(HealthCheckIntervalSeconds), HealthCheckProtocol="TCP", HealthyThresholdCount=Ref(HealthyThresholdCount), Port=80, Protocol="TCP", TargetType="ip", UnhealthyThresholdCount=Ref(UnhealthyThresholdCount), VpcId=Ref(VpcId))) NlbListener = template.add_resource( elb.Listener( "Listener", DependsOn=["NlbTargetGroup", "NetworkLB"], Certificates=[elb.Certificate(CertificateArn=Ref(CertArn))], Port="443", Protocol="TLS", LoadBalancerArn=Ref(NetworkLB), DefaultActions=[ elb.Action(Type="forward", TargetGroupArn=Ref(NlbTargetGroup)) ])) Task_Definition = template.add_resource( TaskDefinition( 'TaskDefinition', Memory='512', Cpu='256', RequiresCompatibilities=['FARGATE'], NetworkMode='awsvpc', ExecutionRoleArn=Ref(ExecutionRoleArn), ContainerDefinitions=[ ContainerDefinition( Name=Join("", [Ref(Application)]), Image=Ref(DockerImage), Essential=True, Environment=[Environment(Name="MY_ENV_VAR", Value="true")], DockerLabels={ 'aws-account': Ref("AWS::AccountId"), 'region': Ref("AWS::Region"), 'stack': Ref("AWS::StackName") }, PortMappings=[ PortMapping(ContainerPort=Ref(ContainerPort)) ]) ])) AwsVpcSg = template.add_resource( ec2.SecurityGroup('SecurityGroup', GroupDescription='Security Group', SecurityGroupIngress=[ ec2.SecurityGroupRule(IpProtocol='-1', CidrIp='10.0.0.0/8') ], SecurityGroupEgress=[ ec2.SecurityGroupRule(IpProtocol="-1", CidrIp="0.0.0.0/0") ], VpcId=Ref(VpcId))) app_service = template.add_resource( Service("AppService", DependsOn=["Listener", "TaskDefinition"], Cluster=Ref(ClusterName), LaunchType='FARGATE', DesiredCount=1, TaskDefinition=Ref(Task_Definition), ServiceName=Join("", [Ref(Application), "-ecs-service"]), LoadBalancers=[ ecs.LoadBalancer(ContainerName=Join( "", [Ref(Application)]), ContainerPort=Ref(ContainerPort), TargetGroupArn=Ref(NlbTargetGroup)) ], NetworkConfiguration=NetworkConfiguration( AwsvpcConfiguration=AwsvpcConfiguration( Subnets=Ref(PrivateSubnets), SecurityGroups=[Ref(AwsVpcSg)])))) AppDNSRecord = template.add_resource( RecordSetType( "AppDNSRecord", DependsOn=["AppService"], HostedZoneName=Join("", [Ref(HostedZoneName), "."]), Name=Join("", [Ref(Application), ".", Ref(HostedZoneName), "."]), Type="CNAME", TTL="900", ResourceRecords=[GetAtt(NetworkLB, "DNSName")])) template.add_output( Output("URL", Description="DomainName", Value=Join("", ["https://", Ref(AppDNSRecord)]))) with open("ecs-fargate-service-cf.yaml", "w") as yamlout: yamlout.write(template.to_yaml())
t.add_parameter(Parameter( "KeyPair", Description="Name of an existing EC2 KeyPair to SSH", Type="AWS::EC2::KeyPair::KeyName", ConstraintDescription="must be the name of an existing EC2 KeyPair.", )) t.add_resource(ec2.SecurityGroup( "SecurityGroup", GroupDescription="Allow SSH and TCP/{} access".format(ApplicationPort), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp=PublicCidrIp, ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=ApplicationPort, ToPort=ApplicationPort, CidrIp="0.0.0.0/0", ), ], )) ud = Base64(Join('\n', [ "#!/bin/bash", "sudo yum install --enablerepo=epel -y nodejs", "wget http://bit.ly/2vESNuc -O /home/ec2-user/helloworld.js", "wget http://bit.ly/2vVvT18 -O /etc/init/helloworld.conf", "start helloworld"
} }], })) instance_profile = t.add_resource( iam.InstanceProfile("ComponentInstanceProfile", Path="/", Roles=[Ref(role)])) elb_sg = t.add_resource( ec2.SecurityGroup("ELBSecurityGroup", VpcId=Ref(vpc_id), GroupDescription="Only allow public traffic on 443", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="443", ToPort="443", CidrIp="0.0.0.0/0", ) ], SecurityGroupEgress=[])) elb = t.add_resource( elasticloadbalancing.LoadBalancer( 'ElasticLoadBalancer', Subnets=Ref(public_subnets), CrossZone=True, SecurityGroups=[Ref(elb_sg)], Listeners=[ elasticloadbalancing.Listener(LoadBalancerPort="443", InstancePort="7443",
def buildInfrastructure(t, args): if (not args.recovery): t.add_resource( kms.Key( 'OpenEMRKey', DeletionPolicy='Retain' if args.recovery else 'Delete' if args.dev else 'Retain', KeyPolicy={ "Version": "2012-10-17", "Id": "key-default-1", "Statement": [{ "Sid": "1", "Effect": "Allow", "Principal": { "AWS": [ Join(':', ['arn:aws:iam:', ref_account, 'root']) ] }, "Action": "kms:*", "Resource": "*" }] } ) ) t.add_resource( s3.Bucket( 'S3Bucket', DeletionPolicy='Retain', BucketName=Join( '-', ['openemr', Select('2', Split('/', ref_stack_id))]) ) ) t.add_resource( s3.BucketPolicy( 'BucketPolicy', Bucket=Ref('S3Bucket'), PolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Sid": "AWSCloudTrailAclCheck", "Effect": "Allow", "Principal": {"Service": "cloudtrail.amazonaws.com"}, "Action": "s3:GetBucketAcl", "Resource": {"Fn::Join": ["", ["arn:aws:s3:::", {"Ref": "S3Bucket"}]]} }, { "Sid": "AWSCloudTrailWrite", "Effect": "Allow", "Principal": {"Service": "cloudtrail.amazonaws.com"}, "Action": "s3:PutObject", "Resource": {"Fn::Join": ["", ["arn:aws:s3:::", {"Ref": "S3Bucket"}, "/AWSLogs/", {"Ref": "AWS::AccountId"}, "/*"]]}, "Condition": { "StringEquals": { "s3:x-amz-acl": "bucket-owner-full-control" } } } ] } ) ) t.add_resource( cloudtrail.Trail( 'CloudTrail', DependsOn='BucketPolicy', IsLogging=True, IncludeGlobalServiceEvents=True, IsMultiRegionTrail=True, S3BucketName=Ref('S3Bucket') ) ) t.add_resource( ec2.SecurityGroup( 'ApplicationSecurityGroup', GroupDescription='Application Security Group', VpcId=Ref('VPC'), Tags=Tags(Name='Application') ) ) return t
)) DefaultSecurityGroup = t.add_resource( ec2.SecurityGroup( "DefaultSecurityGroup", GroupDescription="Default Security group for all the Nodes", SecurityGroupIngress=[ ec2.SecurityGroupRule(ToPort="-1", IpProtocol="icmp", CidrIp=FindInMap("SubnetConfig", "VPC", "CIDR"), FromPort="-1"), ec2.SecurityGroupRule(ToPort="65535", IpProtocol="tcp", CidrIp=FindInMap("SubnetConfig", "VPC", "CIDR"), FromPort="0"), ec2.SecurityGroupRule(ToPort="65535", IpProtocol="udp", CidrIp=FindInMap("SubnetConfig", "VPC", "CIDR"), FromPort="0"), ec2.SecurityGroupRule(ToPort="22", IpProtocol="tcp", CidrIp=Ref(SSHLocation), FromPort="22"), ], VpcId=Ref(VPC), )) PublicRoute = t.add_resource( ec2.Route(
def buildInstance(t, args): t.add_resource( ec2.SecurityGroup( 'WebserverIngressSG', GroupDescription='Global Webserver Access', VpcId=Ref('VPC'), Tags=Tags(Name='Global Webserver Access') ) ) t.add_resource( ec2.SecurityGroupIngress( 'WebserverIngressSG80', GroupId=Ref('WebserverIngressSG'), IpProtocol='tcp', CidrIp='0.0.0.0/0', FromPort='80', ToPort='80' ) ) t.add_resource( ec2.SecurityGroupIngress( 'WebserverIngress443', GroupId=Ref('WebserverIngressSG'), IpProtocol='tcp', CidrIp='0.0.0.0/0', FromPort='443', ToPort='443' ) ) t.add_resource( ec2.SecurityGroup( 'SysAdminAccessSG', GroupDescription='System Administrator Access', VpcId=Ref('VPC'), Tags=Tags(Name='System Administrator Access') ) ) if (args.dev): t.add_resource( ec2.SecurityGroupIngress( 'DevSysadminIngress22', GroupId=Ref('SysAdminAccessSG'), IpProtocol='tcp', CidrIp='0.0.0.0/0', FromPort='22', ToPort='22' ) ) rolePolicyStatements = [ { "Sid": "Stmt1500699052003", "Effect": "Allow", "Action": ["s3:ListBucket"], "Resource": [Join("", ["arn:aws:s3:::", Ref('S3Bucket')])] }, { "Sid": "Stmt1500699052000", "Effect": "Allow", "Action": [ "s3:PutObject", "s3:GetObject", "s3:DeleteObject" ], "Resource": [Join("", ["arn:aws:s3:::", Ref('S3Bucket'), '/Backup/*'])] }, { "Sid": "Stmt1500612724002", "Effect": "Allow", "Action": [ "kms:Encrypt", "kms:Decrypt", "kms:GenerateDataKey*" ], "Resource": [OpenEMRKeyARN] } ] if (args.recovery): rolePolicyStatements.extend([ { "Sid": "Stmt1500699052004", "Effect": "Allow", "Action": ["s3:ListBucket"], "Resource": [Join("", ["arn:aws:s3:::", Ref('RecoveryS3Bucket')])] }, { "Sid": "Stmt1500699052005", "Effect": "Allow", "Action": [ "s3:GetObject", ], "Resource": [Join("", ["arn:aws:s3:::", Ref('RecoveryS3Bucket'), '/Backup/*'])] }, ]) t.add_resource( iam.ManagedPolicy( 'WebserverPolicy', Description='Policy for webserver instance', PolicyDocument={ "Version": "2012-10-17", "Statement": rolePolicyStatements } ) ) t.add_resource( iam.Role( 'WebserverRole', AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] }, Path='/', ManagedPolicyArns=[Ref('WebserverPolicy')] ) ) t.add_resource( iam.InstanceProfile( 'WebserverInstanceProfile', Path='/', Roles=[Ref('WebserverRole')] ) ) t.add_resource( ec2.Volume( 'DockerVolume', DeletionPolicy='Delete' if args.dev else 'Snapshot', Size=Ref('PracticeStorage'), AvailabilityZone=Select("0", GetAZs("")), VolumeType='gp2', Encrypted=True, KmsKeyId=OpenEMRKeyID, Tags=Tags(Name="OpenEMR Practice") ) ) bootstrapScript = [ "#!/bin/bash -x\n", "exec > /var/log/openemr-cfn-bootstrap 2>&1\n", "cfn-init -v ", " --stack ", ref_stack_name, " --resource WebserverInstance ", " --configsets Setup ", " --region ", ref_region, "\n", "cfn-signal -e $? ", " --stack ", ref_stack_name, " --resource WebserverInstance ", " --region ", ref_region, "\n" ] setupScript = [ "#!/bin/bash -xe\n", "exec > /tmp/cloud-setup.log 2>&1\n", "/root/openemr-devops/packages/standard/ami/ami-configure.sh\n" ] stackPassthroughFile = [ "S3=", Ref('S3Bucket'), "\n", "KMS=", OpenEMRKeyID, "\n" ] if (args.recovery): stackPassthroughFile.extend([ "RECOVERYS3=", Ref('RecoveryS3Bucket'), "\n", "RECOVERY_NEWRDS=", GetAtt( 'RDSInstance', 'Endpoint.Address'), "\n", ]) if (args.recovery): dockerComposeFile = [ "version: '3.1'\n", "services:\n", " openemr:\n", " restart: always\n", " image: openemr/openemr", docker_version, "\n", " ports:\n", " - 80:80\n", " - 443:443\n", " volumes:\n", " - logvolume01:/var/log\n", " - sitevolume:/var/www/localhost/htdocs/openemr/sites\n", " environment:\n", " MANUAL_SETUP: 1\n", "volumes:\n", " logvolume01: {}\n", " sitevolume: {}\n" ] else: dockerComposeFile = [ "version: '3.1'\n", "services:\n", " openemr:\n", " restart: always\n", " image: openemr/openemr", docker_version, "\n", " ports:\n", " - 80:80\n", " - 443:443\n", " volumes:\n", " - logvolume01:/var/log\n", " - sitevolume:/var/www/localhost/htdocs/openemr/sites\n", " environment:\n", " MYSQL_HOST: '", GetAtt( 'RDSInstance', 'Endpoint.Address'), "'\n", " MYSQL_ROOT_USER: openemr\n", " MYSQL_ROOT_PASS: '******'RDSPassword'), "'\n", " MYSQL_USER: openemr\n", " MYSQL_PASS: '******'RDSPassword'), "'\n", " OE_USER: admin\n", " OE_PASS: '******'AdminPassword'), "'\n", "volumes:\n", " logvolume01: {}\n", " sitevolume: {}\n" ] bootstrapInstall = cloudformation.InitConfig( files={ "/root/cloud-setup.sh": { "content": Join("", setupScript), "mode": "000500", "owner": "root", "group": "root" }, "/root/cloud-variables": { "content": Join("", stackPassthroughFile), "mode": "000500", "owner": "root", "group": "root" }, "/root/openemr-devops/packages/standard/docker-compose.yaml": { "content": Join("", dockerComposeFile), "mode": "000500", "owner": "root", "group": "root" } }, commands={ "01_setup": { "command": "/root/cloud-setup.sh" } } ) bootstrapMetadata = cloudformation.Metadata( cloudformation.Init( cloudformation.InitConfigSets( Setup=['Install'] ), Install=bootstrapInstall ) ) t.add_resource( ec2.Instance( 'WebserverInstance', Metadata=bootstrapMetadata, ImageId=FindInMap('RegionData', ref_region, 'OpenEMRMktPlaceAMI'), InstanceType=Ref('WebserverInstanceSize'), NetworkInterfaces=[ec2.NetworkInterfaceProperty( AssociatePublicIpAddress=True, DeviceIndex="0", GroupSet=[Ref('ApplicationSecurityGroup'), Ref( 'WebserverIngressSG'), Ref('SysAdminAccessSG')], SubnetId=Ref('PublicSubnet1') )], KeyName=Ref('EC2KeyPair'), IamInstanceProfile=Ref('WebserverInstanceProfile'), Volumes=[{ "Device": "/dev/sdd", "VolumeId": Ref('DockerVolume') }], Tags=Tags(Name='OpenEMR Cloud Standard'), InstanceInitiatedShutdownBehavior='stop', UserData=Base64(Join('', bootstrapScript)), CreationPolicy={ "ResourceSignal": { "Timeout": "PT15M" } } ) ) return t
"Effect": "Allow" }, ] }, Roles=[Ref(TaskExecutionRole)], )) # Create Security group that allows traffic into the ALB ALBSecurityGroup = t.add_resource( ec2.SecurityGroup("ALBSecurityGroup", GroupDescription="ALB Security Group", VpcId=Ref(VPC), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="80", ToPort="80", CidrIp="0.0.0.0/0", ) ])) # Create Security group for the Fargate tasks that allows 80 from the ALB TaskSecurityGroup = t.add_resource( ec2.SecurityGroup("TaskSecurityGroup", GroupDescription="Task Security Group", VpcId=Ref(VPC), SecurityGroupIngress=[ ec2.SecurityGroupRule(IpProtocol="tcp", FromPort="80", ToPort="80",