def gen_postgis_function(): PostGisFunction = Function( "PostGisProvisionerFunction", Code=Code( S3Bucket=Ref("BucketName"), S3Key=Ref("BucketKey"), ), FunctionName=Sub("${AWS::StackName}-PostGisProvisioner"), Handler="postgis_provisioner.lambda_handler", Role=GetAtt("PostgisProvisionerExecutionRole", "Arn"), Timeout="60", Runtime="python3.6", VpcConfig=VPCConfig( SecurityGroupIds=[Ref("PostGisProvisionerSg")], SubnetIds=[ Select( 0, Split( ",", ImportValue( Sub("${NetworkName}-network-vpc-PrivateSubnets"))) ), Select( 1, Split( ",", ImportValue( Sub("${NetworkName}-network-vpc-PrivateSubnets")))) ])) return PostGisFunction
def test_split(self): delimiter = ',' source_string = ('{ "Fn::ImportValue": { "Fn::Sub": ' '"${VpcStack}-PublicSubnets" }') raw = Split(delimiter, source_string) actual = raw.to_dict() expected = ( {'Fn::Split': [',', '{ "Fn::ImportValue": { ' '"Fn::Sub": "${VpcStack}-PublicSubnets" }']} ) self.assertEqual(expected, actual) with self.assertRaises(ValueError): Join(10, "foobar")
def add_dns_entries(self, template): """ Method to add CloudMap service and record for DNS resolution. """ sd_entry = SdService( f"{self.title.title()}ServiceDiscovery", template=template, DependsOn=[self.service.title], Description=Sub( f"Record for VirtualService {self.title} in mesh ${{{self.service.title}.MeshName}}" ), NamespaceId=Ref(PRIVATE_DNS_ZONE_ID), DnsConfig=SdDnsConfig( RoutingPolicy="MULTIVALUE", NamespaceId=Ref(AWS_NO_VALUE), DnsRecords=[SdDnsRecord(TTL="30", Type="A")], ), Name=Select(0, Split(".", GetAtt(self.service, "VirtualServiceName"))), ) SdInstance( f"{self.title.title()}ServiceDiscoveryFakeInstance", template=template, InstanceAttributes={"AWS_INSTANCE_IPV4": f"169.254.255.254"}, ServiceId=Ref(sd_entry), )
def _wait_condition_data_to_s3_url(condition: cloudformation.WaitCondition, artifacts_bucket: s3.Bucket) -> Sub: """Build a CloudFormation ``Sub`` structure that resolves to the S3 key reported to a wait condition. :param condition: Wait condition to reference :param artifacts_bucket: Bucket to reference """ return Sub( f"https://${{{artifacts_bucket.title}.DomainName}}/${{key}}", {"key": Select(3, Split('"', condition.get_att("Data")))}, )
def create_composite_alarm(alarm: Alarm, alarms: list[Alarm]) -> None: """ Function to create the composite alarms """ if alarm.properties and keyisset("AlarmRule", alarm.properties): eval_expression = alarm.properties["AlarmRule"] elif alarm.parameters and keyisset("CompositeExpression", alarm.parameters): eval_expression = alarm.parameters["CompositeExpression"] else: raise KeyError( "Either Properties.AlarmRule or MacroParameters.CompositeExpression must be set", alarm.properties, alarm.parameters, ) mapping = map_expression_to_alarms(eval_expression, alarms) composite_expression = create_composite_alarm_expression( mapping, eval_expression) stack_id = Select(4, Split("-", Select(2, Split("/", Ref(AWS_STACK_ID))))) alarm_name = f"${{{AWS_REGION}}}-${{StackId}}-CompositeAlarmFor" + "".join( [a.title for a in mapping.values()]) alarm_name = (alarm_name[:(254 - 12)] if len(alarm_name) > (254 - 12) else alarm_name) if alarm.properties: props = import_record_properties(alarm.properties, CompositeAlarm) props.update({ "AlarmRule": composite_expression, "AlarmName": Sub(alarm_name, StackId=stack_id), }) else: props = { "AlarmRule": composite_expression, "AlarmName": Sub(alarm_name, StackId=stack_id), "ActionsEnabled": True, } alarm.properties = props alarm.cfn_resource = CompositeAlarm( alarm.logical_name, DependsOn=[a.title for a in mapping.values()], **props, )
def gen_rds_db(service_name): db_subnet_group = DBSubnetGroup( "DBSubnetGroup", DBSubnetGroupDescription="Subnets available for the RDS DB Instance", SubnetIds=[ Select( 0, Split( ",", ImportValue( Sub("${NetworkName}-network-vpc-PrivateSubnets")))), Select( 1, Split( ",", ImportValue( Sub("${NetworkName}-network-vpc-PrivateSubnets")))) ], ) db = DBInstance("DB", DBName=Ref(parameters['DBName']), AllocatedStorage=Ref(parameters['DBStorage']), DBInstanceClass=Ref(parameters['DBClass']), DBInstanceIdentifier=service_name, VPCSecurityGroups=[Ref('DBSecurityGroup')], Engine=Ref(parameters['DBEngine']), EngineVersion=Ref(parameters['DBEngineVersion']), StorageType=Ref(parameters['DBStorageType']), Iops=Ref(parameters['Iops']), MasterUsername=Ref(parameters['Username']), MasterUserPassword=Ref(parameters['Password']), MultiAZ=Ref(parameters['MultiAZ']), PubliclyAccessible=Ref(parameters['PubliclyAccessible']), DBSubnetGroupName=Ref("DBSubnetGroup"), Tags=gen_tags(service_name)) return [db, db_subnet_group]
action="store_true") args = parser.parse_args() t = Template() t.add_version('2010-09-09') descString = 'OpenEMR Cloud Standard v5.0.2-3 cloud deployment' if (args.dev): descString += ' [developer]' if (args.recovery): descString += ' [recovery]' t.add_description(descString) # reduce to consistent names if (args.recovery): OpenEMRKeyID = Select('1', Split('/', Ref('RecoveryKMSKey'))) OpenEMRKeyARN = Ref('RecoveryKMSKey') else: OpenEMRKeyID = Ref('OpenEMRKey') OpenEMRKeyARN = GetAtt('OpenEMRKey', 'Arn') if (args.recovery): setRecoveryInputs(t, args) else: setInputs(t, args) setMappings(t, args) buildVPC(t, args) buildInfrastructure(t, args) buildMySQL(t, args) buildInstance(t, args)
Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["ecs.amazonaws.com"])) ]), Path="/", ManagedPolicyArns=[ 'arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceRole' ])) t.add_resource( ecs.Service( "service", Cluster=ImportValue( Join( "-", [Select(0, Split("-", Ref("AWS::StackName"))), "cluster-id"])), DesiredCount=1, TaskDefinition=Ref("task"), LoadBalancers=[ ecs.LoadBalancer( ContainerName="helloworld", ContainerPort=3000, TargetGroupArn=ImportValue( Join("-", [ Select(0, Split("-", Ref("AWS::StackName"))), "alb-target-group" ]), ), ) ], Role=Ref("ServiceRole")))
### CloudfrontDistribution = t.add_resource( cloudfront.Distribution( "CloudfrontDistribution", DistributionConfig=cloudfront.DistributionConfig( Aliases=[CONFIG['DOMAIN_NAME']], Origins=[ cloudfront.Origin( Id="Origin 1", # turn `http://mybucket.s3-website-us-east-1.amazonaws.com/`y # into `mybucket.s3-website-us-east-1.amazonaws.com` DomainName=Select( 2, Split("/", GetAtt(StaticHostingPublicBucket, 'WebsiteURL'))), # S3 website hosting only serves on 80 CustomOriginConfig=cloudfront.CustomOriginConfig( HTTPPort=80, OriginProtocolPolicy='http-only', )) ], ViewerCertificate=cloudfront.ViewerCertificate( AcmCertificateArn=Ref(CloudFrontCertificate), SslSupportMethod='sni-only', ), DefaultCacheBehavior=cloudfront.DefaultCacheBehavior( TargetOriginId="Origin 1", ForwardedValues=cloudfront.ForwardedValues(QueryString=False),
Ref('AWS::Region'), "\n"])), ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), KeyName=Ref("KeyPair"), SecurityGroups=[Ref("SecurityGroup")], IamInstanceProfile=Ref('EC2InstanceProfile'), InstanceType=instanceSize, AssociatePublicIpAddress='true', )) t.add_resource(AutoScalingGroup( 'ECSAutoScalingGroup', DesiredCapacity=desiredCapacity, MinSize=minCapacity, MaxSize=maxCapacity, VPCZoneIdentifier=Split(",", Ref("PublicSubnet")), LaunchConfigurationName=Ref('ContainerInstances'), )) states = { "High": { "threshold": ScaleUpLevel, "alarmPrefix": "ScaleUpPolicyFor", "operator": "GreaterThanThreshold", "adjustment": "1" }, "Low": { "threshold": ScaleDownLevel, "alarmPrefix": "ScaleDownPolicyFor", "operator": "LessThanThreshold", "adjustment": "-1"
), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="80", ToPort="80", CidrIp="104.129.192.0/23", ), ], )) # Add the LB using our SG and user-defined subnets ALBResource = t.add_resource( elb.LoadBalancer( "{}LoadBalancer".format(e), Scheme="internet-facing", Subnets=Split( ',', ImportValue(Join("-", [e, "cluster-public-subnets"]))), SecurityGroups=[Ref("{}ELBSecurityGroup".format(e))], )) # Run a for-loop to create target groups for each service for s in services: t.add_resource( elb.TargetGroup( #"TargetGroup", "{}{}TargetGroup".format(e, s), Name=Join("-", [e, s, "TG"]), DependsOn="{}LoadBalancer".format(e), HealthCheckIntervalSeconds="20", HealthCheckProtocol="HTTP", HealthCheckTimeoutSeconds="15", HealthyThresholdCount="5",
t.add_resource( TaskDefinition( "task", ContainerDefinitions=[ ContainerDefinition( Image=Join("", [ Ref("AWS::AccountId"), ".dkr.ecr.", Ref("AWS::Region"), ".amazonaws.com", "/", ImportValue("community-mother-api-dev-repo"), ":", Ref("Tag") ]), Memory=957, Cpu=2048, Name=Join("-", [ Select(0, Split("-", Ref("AWS::StackName"))), Select(1, Split("-", Ref("AWS::StackName"))), Select(2, Split("-", Ref("AWS::StackName"))) ]), Environment=[ Environment(Name="application_env", Value=Ref("ApplicationEnv")) ], PortMappings=[ecs.PortMapping(ContainerPort=80)]) ], )) t.add_resource( Role( "ServiceRole", AssumeRolePolicyDocument=Policy(Statement=[
'SecurityGroups': 'Security Groups', 'KeyName': 'SSH Key Name', 'InstanceType': 'Instance Type', 'EnvType': 'test', 'ScaleCapacity': 'Number of api servers to run', 'Subnets': 'ASG Subnets' } for p in params.keys(): vars()[p] = t.add_parameter( Parameter(p, Type="String", Description=params[p])) LaunchConfig = t.add_resource( LaunchConfiguration("LaunchConfiguration", ImageId=Ref(AmiId), SecurityGroups=Split(',', Ref(SecurityGroups)), KeyName=Ref(KeyName), InstanceType=Ref(InstanceType))) t.add_resource( AutoScalingGroup( "AutoscalingGroup", Tags=[ Tag("Environment", Ref(EnvType), True), Tag("Name", Ref(InstanceName), True) ], DesiredCapacity=Ref(ScaleCapacity), LaunchConfigurationName=Ref(LaunchConfig), MinSize=Ref(ScaleCapacity), MaxSize=Ref(ScaleCapacity), VPCZoneIdentifier=Split(',', Ref(Subnets)),
Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["ecs.amazonaws.com"]) ) ] ), Path="/", ManagedPolicyArns=['arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceRole'] )) t.add_resource(ecs.Service( "service", Cluster=ImportValue( Join( "-", [Select(0, Split("-", Ref("AWS::StackName"))),"cluster-id"] ) ), DesiredCount=1, TaskDefinition=Ref("task"), LoadBalancers=[ecs.LoadBalancer( ContainerName="helloworld", ContainerPort=3000, TargetGroupArn=ImportValue( Join( "-", [Select(0, Split("-", Ref("AWS::StackName"))),"alb-target-group"] ), ), )], Role=Ref("ServiceRole")
DomainName=cdn_domain, DependsOn=redirectBucket, SubjectAlternativeNames=[alternate_name], DomainValidationOptions=[ DomainValidationOption(DomainName=cdn_domain, ValidationDomain=dns_domain) ], ValidationMethod='DNS', Tags=DefaultTags + Tags(Name='{}-{}'.format(env_l, app_group_l)))) # Provision the CDN Origin cdnOrigin = cf.Origin(Id='{}-{}-{}'.format(env_l, app_group_l, src_domain), DomainName=Select( 1, Split('//', GetAtt(redirectBucket, 'WebsiteURL'))), CustomOriginConfig=cf.CustomOriginConfig( HTTPPort=80, HTTPSPort=443, OriginProtocolPolicy='http-only', OriginSSLProtocols=['TLSv1.2'], )) # Provision the CDN Distribution cdnDistribution = t.add_resource( cf.Distribution( 'cdnDistribution{}'.format(src_domain.replace('.', '0')), DependsOn='cdnCertificate{}'.format(src_domain.replace('.', '0')), DistributionConfig=cf.DistributionConfig( Comment='{} - {}'.format(env, cdn_domain), Enabled=True,
'chmod +x ./install\n', 'sudo ./install auto\n', ])))) loadbalancer = [] targetgroup = [] if "elb" in rolemap[role]: elb_identifier = "" if rolemap[role]["elb"]["subnet"] in public_prefixes: elb_identifier = "pubsub" + rolemap[role]["elb"]["subnet"].upper() elb = t.add_resource( LoadBalancer( "elb" + role.upper(), Subnets=Split(",", Ref(elb_identifier)), Listeners=[ Listener( LoadBalancerPort=80, InstancePort=80, Protocol='HTTP', ), ], SecurityGroups=[Ref("defaultSG"), Ref(elbSecurityGroup)], HealthCheck=HealthCheck( Target=rolemap[role]["elb"]["healthcheck"], HealthyThreshold="2", UnhealthyThreshold="2", Interval="10", Timeout="5"),
def add_resources(self): self.runner_ssm_role = self.template.add_resource( Role( "RunnerSsmRole", Path="/", ManagedPolicyArns=[ "arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM" ], AssumeRolePolicyDocument=aws.Policy(Statement=[ Statement( Action=[sts.AssumeRole], Effect=Allow, Principal=Principal("Service", ["ec2.amazonaws.com"]), ) ]), )) self.runner_ssm_instanceprofile = self.template.add_resource( InstanceProfile("RunnerSsmInstanceProfile", Path="/", Roles=[Ref(self.runner_ssm_role)])) self.runner_launch_config = self.template.add_resource( LaunchConfiguration( "RunnerLaunchConfiguration", UserData=Base64( Join( "", [ "#!/bin/bash\n", "#####install ssm######\n", "yum install -y amazon-ssm-agent\n", "systemctl enable amazon-ssm-agent\n", "systemctl start amazon-ssm-agent\n", "####install docker####\n", "yum install -y docker\n", "systemctl enable docker\n", "systemctl start docker\n", "####install runner####\n", "yum install -y wget\n", "wget -O /usr/local/bin/gitlab-runner ", "https://gitlab-runner-downloads.s3.amazonaws.com/v", Ref(self.runner_version), "/binaries/gitlab-runner-linux-amd64\n", "ln -s /usr/local/bin/gitlab-runner ", "/usr/bin/gitlab-runner\n", "chmod +x /usr/local/bin/gitlab-runner\n", "useradd --comment 'GitLab Runner' ", "--create-home gitlab-runner --shell /bin/bash\n", "/usr/local/bin/gitlab-runner install ", "--user=gitlab-runner " "--working-directory=/home/gitlab-runner\n", "systemctl enable gitlab-runner\n", "systemctl start gitlab-runner\n", "####register runner####\n", "gitlab-runner register ", "--config=/etc/gitlab-runner/config.toml ", "--request-concurrency=", Ref(self.runner_job_concurrency), " ", "--tag-list=", Ref(self.runner_tag_list), " ", "--non-interactive ", "--registration-token=", Ref(self.runner_register_token), " ", "--run-untagged=true ", "--locked=false ", "--url=", Ref(self.runner_gitlab_url), " ", "--executor=docker ", "--docker-image=alpine:latest ", "--docker-privileged=true\n", "####create unregister script####\n", "TOKEN=$(gitlab-runner list 2>&1 | grep Executor | ", "awk '{ print $4 }' | awk -F= '{ print $2 }')\n", "URL=$(gitlab-runner list 2>&1 | grep Executor | ", "awk '{ print $5 }' | awk -F= '{ print $2 }')\n", "echo gitlab-runner unregister ", "--url $URL --token $TOKEN > /unregister.sh\n", "chmod +x /unregister.sh", ], )), ImageId=Ref(self.runner_ami_id), KeyName=Ref(self.runner_key_pair), BlockDeviceMappings=[ BlockDeviceMapping( DeviceName="/dev/xvda", Ebs=EBSBlockDevice( VolumeSize=Ref(self.runner_volume_size)), ) ], SecurityGroups=[Ref(self.runner_security_group)], InstanceType=Ref(self.runner_server_instance_type), IamInstanceProfile=GetAtt(self.runner_ssm_instanceprofile, "Arn"), )) self.runner_autoscaling_group = self.template.add_resource( AutoScalingGroup( "RunnerAutoscalingGroup", DesiredCapacity=Ref(self.runner_desired_count), LaunchConfigurationName=Ref(self.runner_launch_config), MinSize=Ref(self.runner_min_count), MaxSize=Ref(self.runner_max_count), VPCZoneIdentifier=Split(",", Ref(self.runner_subnets)), Tags=[Tag("Name", "gitlab-runner-created-by-asg", True)], ))
############## t.add_parameter( Parameter("RepoName", Type="String", Description="Name of the CodeCommit repository to source")) ############# # Resources # ############# ### ECR #### # Create the resource t.add_resource( Repository("Repository", RepositoryName=Select(0, Split("-", Ref("AWS::StackName"))))) # Define the stack output t.add_output( Output( "Repository", Description="ECR repository", Value=Select(0, Split("-", Ref("AWS::StackName"))), Export=Export(Join("-", [Ref("RepoName"), "repo"])), )) #### CodeBuild #### t.add_resource( Role("ServiceRole", AssumeRolePolicyDocument=Policy(Statement=[
def buildInfrastructure(t, args): if (not args.recovery): t.add_resource( kms.Key( 'OpenEMRKey', DeletionPolicy='Retain' if args.recovery else 'Delete' if args.dev else 'Retain', KeyPolicy={ "Version": "2012-10-17", "Id": "key-default-1", "Statement": [{ "Sid": "1", "Effect": "Allow", "Principal": { "AWS": [Join(':', ['arn:aws:iam:', ref_account, 'root'])] }, "Action": "kms:*", "Resource": "*" }] })) t.add_resource( s3.Bucket( 'S3Bucket', DeletionPolicy='Retain', BucketName=Join( '-', ['openemr', Select('2', Split('/', ref_stack_id))]))) t.add_resource( s3.BucketPolicy( 'BucketPolicy', Bucket=Ref('S3Bucket'), PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Sid": "AWSCloudTrailAclCheck", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": "s3:GetBucketAcl", "Resource": { "Fn::Join": ["", ["arn:aws:s3:::", { "Ref": "S3Bucket" }]] } }, { "Sid": "AWSCloudTrailWrite", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": "s3:PutObject", "Resource": { "Fn::Join": [ "", [ "arn:aws:s3:::", { "Ref": "S3Bucket" }, "/AWSLogs/", { "Ref": "AWS::AccountId" }, "/*" ] ] }, "Condition": { "StringEquals": { "s3:x-amz-acl": "bucket-owner-full-control" } } }] })) t.add_resource( cloudtrail.Trail('CloudTrail', DependsOn='BucketPolicy', IsLogging=True, IncludeGlobalServiceEvents=True, IsMultiRegionTrail=True, S3BucketName=Ref('S3Bucket'))) t.add_resource( ec2.SecurityGroup('ApplicationSecurityGroup', GroupDescription='Application Security Group', VpcId=Ref('VPC'), Tags=Tags(Name='Application'))) return t
} FARGATE_MODES_VALUES = [] for cpu in FARGATE_MODES.keys(): for ram in FARGATE_MODES[cpu]: FARGATE_MODES_VALUES.append(f"{cpu}!{ram}") FARGATE_CPU_RAM_CONFIG_T = "FargateCpuRamConfiguration" FARGATE_CPU_RAM_CONFIG = Parameter( FARGATE_CPU_RAM_CONFIG_T, Type="String", AllowedValues=FARGATE_MODES_VALUES, Default="256!512", ) FARGATE_CPU = Select(0, Split("!", Ref(FARGATE_CPU_RAM_CONFIG))) FARGATE_RAM = Select(1, Split("!", Ref(FARGATE_CPU_RAM_CONFIG))) CLUSTER_SG_ID_T = "ClusterWideSGId" CLUSTER_SG_ID = Parameter( CLUSTER_SG_ID_T, Type="String", Default="none", AllowedPattern=r"(none|^sg-[a-z0-9]+$)", ) SERVICE_GROUP_ID_T = "ServiceGroupId" SERVICE_GROUP_ID = Parameter(SERVICE_GROUP_ID_T, Type=SG_ID_TYPE, Default="<none>") AWS_XRAY_IMAGE = "amazon/aws-xray-daemon" XRAY_IMAGE_T = "AWSXRayImage"
rds_instance = t.add_resource( rds.DBInstance( 'RdsInstance', # DeletionPolicy=Retain, # DBName=Ref(param_dbname), DBSnapshotIdentifier=If('UseSnapshotCondition', Ref(param_db_snapshot), Ref(AWS_NO_VALUE)), MasterUsername=If('NewDatabaseCondition', Ref(param_db_user), Ref(AWS_NO_VALUE)), MasterUserPassword=If('NewDatabaseCondition', Ref(param_db_password), Ref(AWS_NO_VALUE)), Engine=Ref(param_db_engine), LicenseModel=If('OrcaleCondition', 'license-included', Ref(AWS_NO_VALUE)), EngineVersion=Select(1, Split('-', Ref(param_db_engine_version))), AllowMajorVersionUpgrade=False, AutoMinorVersionUpgrade=True, DBInstanceClass=Ref(param_db_class), MultiAZ=Ref(param_db_multi_az), StorageType=Ref(param_db_stroage_type), AllocatedStorage=Ref(param_db_storage_size), Iops=If('IopsStorageCondition', Ref(param_db_storage_iops), Ref(AWS_NO_VALUE)), StorageEncrypted=Ref(param_db_storage_encrypted), KmsKeyId=If( 'StorageEncryptedConditon', If('DefaultKmsCondition', Ref(AWS_NO_VALUE), Ref(param_db_kms_key)), Ref(AWS_NO_VALUE), ),
from troposphere import And, Condition, Equals, Not, Or, Ref, Select, Split from ecs_composex.rds.rds_params import DB_ENGINE_NAME, DB_SNAPSHOT_ID, DBS_SUBNET_GROUP DBS_SUBNET_GROUP_CON_T = "CreateSubnetGroupCondition" DBS_SUBNET_GROUP_CON = Equals(Ref(DBS_SUBNET_GROUP), DBS_SUBNET_GROUP.Default) NOT_USE_DB_SNAPSHOT_CON_T = "NotUseSnapshotToCreateDbCondition" NOT_USE_DB_SNAPSHOT_CON = Equals(Ref(DB_SNAPSHOT_ID), DB_SNAPSHOT_ID.Default) USE_DB_SNAPSHOT_CON_T = "UseSnapshotToCreateDbCondition" USE_DB_SNAPSHOT_CON = Not(Condition(NOT_USE_DB_SNAPSHOT_CON_T)) USE_CLUSTER_CON_T = "UseAuroraClusterCondition" USE_CLUSTER_CON = Equals("aurora", Select(0, Split("-", Ref(DB_ENGINE_NAME)))) NOT_USE_CLUSTER_CON_T = "NotUseClusterCondition" NOT_USE_CLUSTER_CON = Not(Condition(USE_CLUSTER_CON_T)) USE_CLUSTER_AND_SNAPSHOT_CON_T = "UseClusterAndSnapshotCondition" USE_CLUSTER_AND_SNAPSHOT_CON = And(Condition(USE_CLUSTER_CON_T), Condition(USE_DB_SNAPSHOT_CON_T)) USE_CLUSTER_NOT_SNAPSHOT_CON_T = "UseClusterAndNotSnapshotCondition" USE_CLUSTER_NOT_SNAPSHOT_CON = And(Condition(USE_CLUSTER_CON_T), Condition(NOT_USE_DB_SNAPSHOT_CON_T)) NOT_USE_CLUSTER_USE_SNAPSHOT_CON_T = "NotUseClusterButUseSnapshotCondition" NOT_USE_CLUSTER_USE_SNAPSHOT_CON = And(Condition(NOT_USE_CLUSTER_CON_T), Condition(USE_DB_SNAPSHOT_CON_T))
def buildInfrastructure(t, args): t.add_resource( ec2.VPC('VPC', CidrBlock='10.0.0.0/16', EnableDnsSupport='true', EnableDnsHostnames='true')) t.add_resource( ec2.Subnet('PublicSubnet1', VpcId=Ref('VPC'), CidrBlock='10.0.1.0/24', AvailabilityZone=Select("0", GetAZs("")))) t.add_resource(ec2.InternetGateway('ig')) t.add_resource( ec2.VPCGatewayAttachment('igAttach', VpcId=Ref('VPC'), InternetGatewayId=Ref('ig'))) t.add_resource(ec2.RouteTable('rtTablePublic', VpcId=Ref('VPC'))) t.add_resource( ec2.Route('rtPublic', RouteTableId=Ref('rtTablePublic'), DestinationCidrBlock='0.0.0.0/0', GatewayId=Ref('ig'), DependsOn='igAttach')) t.add_resource( ec2.SubnetRouteTableAssociation('rtPublic1Attach', SubnetId=Ref('PublicSubnet1'), RouteTableId=Ref('rtTablePublic'))) t.add_resource( kms.Key('OpenEMRKey', DeletionPolicy='Delete', KeyPolicy={ "Version": "2012-10-17", "Id": "key-default-1", "Statement": [{ "Sid": "1", "Effect": "Allow", "Principal": { "AWS": [Join(':', ['arn:aws:iam:', ref_account, 'root'])] }, "Action": "kms:*", "Resource": "*" }] })) t.add_resource( s3.Bucket( 'S3Bucket', DeletionPolicy='Retain', BucketName=Join( '-', ['openemr', Select('2', Split('/', ref_stack_id))]))) t.add_resource( s3.BucketPolicy( 'BucketPolicy', Bucket=Ref('S3Bucket'), PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Sid": "AWSCloudTrailAclCheck", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": "s3:GetBucketAcl", "Resource": { "Fn::Join": ["", ["arn:aws:s3:::", { "Ref": "S3Bucket" }]] } }, { "Sid": "AWSCloudTrailWrite", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": "s3:PutObject", "Resource": { "Fn::Join": [ "", [ "arn:aws:s3:::", { "Ref": "S3Bucket" }, "/AWSLogs/", { "Ref": "AWS::AccountId" }, "/*" ] ] }, "Condition": { "StringEquals": { "s3:x-amz-acl": "bucket-owner-full-control" } } }] })) t.add_resource( cloudtrail.Trail('CloudTrail', DependsOn='BucketPolicy', IsLogging=True, IncludeGlobalServiceEvents=True, IsMultiRegionTrail=True, S3BucketName=Ref('S3Bucket'))) return t
from troposphere import Template, Parameter, Ref, Select, Split import troposphere.ec2 as ec2 t = Template() p_vpc_cidr = t.add_parameter( Parameter('VpcCidr', Type='String', Default='10.0.0.0/24')) p_subnets_cidr = t.add_parameter( Parameter('Subnets', Type='String', Default='10.0.0.0/25,10.0.0.128/25')) r_vpc = t.add_resource(ec2.VPC('VPC', CidrBlock=Ref(p_vpc_cidr))) t.add_resource([ ec2.Subnet('SubnetAz1', CidrBlock=Select(0, Split(',', Ref(p_subnets_cidr))), VpcId=Ref(r_vpc), AvailabilityZone='eu-west-1a'), ec2.Subnet('SubnetAz2', CidrBlock=Select(1, Split(',', Ref(p_subnets_cidr))), VpcId=Ref(r_vpc), AvailabilityZone='eu-west-1b') ]) print(t.to_yaml())
from troposphere import elasticloadbalancingv2 as elb from troposphere import (Export, GetAtt, ImportValue, Join, Output, Ref, Select, Split, Sub, Template, ec2) t = Template() t.add_description("Effective DevOps in AWS: ALB for the ECS Cluster") t.add_resource( ec2.SecurityGroup( "LoadBalancerSecurityGroup", GroupDescription="Web load balancer security group.", VpcId=ImportValue( Join("-", [ Select(0, Split("-", Ref("AWS::StackName"))), "cluster-vpc-id" ])), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="3000", ToPort="3000", CidrIp="0.0.0.0/0", ), ], )) t.add_resource( elb.LoadBalancer( "LoadBalancer", Scheme="internet-facing",
import warnings from typing import Iterable, Tuple from troposphere import AWSObject, Tags from troposphere import Sub, ImportValue, Ref, GetAtt, Split from troposphere.cloudformation import AWSCustomObject from troposphere import awslambda as awsλ, iam, ec2 import awacs.awslambda import awacs.sts import yaml from . import policy, secret from .common import Config VPC = ImportValue(Sub('${Stage}-VPC')) PUBLIC_SUBNETS = Split(',', ImportValue(Sub('${Stage}-PublicSubnets'))) PRIVATE_SUBNETS = Split(',', ImportValue(Sub('${Stage}-PrivateSubnets'))) class APIContribution(AWSCustomObject): resource_type = 'Custom::ApiContribution' props = dict( ServiceToken=(str, True), LambdaProxyArn=(str, True), S3Bucket=(str, False), S3Key=(str, False), SwaggerDefinition=(dict, False), RestApiId=(str, False), )
Action=[AssumeRole], Principal=Principal("Service", ["ecs.amazonaws.com"]) ) ] ), Path="/", ManagedPolicyArns=[ 'arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceRole'] )) t.add_resource(ecs.Service( "service", Cluster=ImportValue( Join( "-", [Select(0, Split("-", Ref("AWS::StackName"))), "cluster-id"] ) ), DesiredCount=1, TaskDefinition=Ref("task"), LoadBalancers=[ecs.LoadBalancer( ContainerName="helloworld", ContainerPort=3000, TargetGroupArn=ImportValue( Join( "-", [Select(0, Split("-", Ref("AWS::StackName"))), "alb-target-group"] ), ),
SSEAlgorithm='AES256')) ], [ServerSideEncryptionRule()])), VersioningConfiguration=VersioningConfiguration(Status="Enabled"), DeletionPolicy="Retain", CorsConfiguration=CorsConfiguration( CorsRules=[ CorsRules( AllowedOrigins=Split( ";", Join( "", [ "https://", domain_name, If( no_alt_domains, # if we don't have any alternate domains, return an empty string "", # otherwise, return the ';https://' that will be needed by the first domain ";https://", ), # then, add all the alternate domains, joined together with ';https://' Join(";https://", domain_name_alternates), # now that we have a string of origins separated by ';', Split() is used to make it into a list again ])), AllowedMethods=[ "POST", "PUT", "HEAD", "GET", ], AllowedHeaders=[
commands: - echo "Starting python execution" - python alb-route53-cf-template.py > /tmp/alb-route53-cf.template post_build: commands: - echo "Completed CFN template creation." artifacts: files: /tmp/alb-route53-cf.template discard-paths: yes """ t.add_resource( Project( "CodeBuild", Name=Join("-", [Select(0, Split("-", Ref("AWS::StackName"))), "codebuild"]), Environment=environment, ServiceRole=Ref("ServiceRole"), Source=Source(Type="CODEPIPELINE", BuildSpec=buildspec), Artifacts=Artifacts(Type="CODEPIPELINE", Name="output"), )) #### CodePipeline #### t.add_resource( Bucket("S3Bucket", VersioningConfiguration=VersioningConfiguration( Status="Enabled", ))) t.add_resource( Role("PipelineRole", AssumeRolePolicyDocument=Policy(Statement=[
Default="latest", Description="Tag to deploy")) # c5b1dc0a50a8c10a18750dc7f6246e9d1c6aa568 # First, we define an ECS task t.add_resource( TaskDefinition( "task", ContainerDefinitions=[ ContainerDefinition( Image=Join("", [ Ref("AWS::AccountId"), ".dkr.ecr.", Ref("AWS::Region"), ".amazonaws.com", "/", Select(1, Split("-", Ref("AWS::StackName"))), ":", Ref("Tag") ]), Memory=TaskMemory, Cpu=TaskCPU, Name=Select(1, Split("-", Ref("AWS::StackName"))), PortMappings=[ecs.PortMapping(ContainerPort=3000)]) ], )) # Then a service t.add_resource( Role( "ServiceRole", AssumeRolePolicyDocument=Policy(Statement=[