def build(self, t): evt = t.add_resource( events.Rule('{}Event'.format(self.name), Targets=[])) if self.schedule is not None: evt.ScheduleExpression = self.schedule if self.enabled: evt.State = 'ENABLED' else: evt.State = 'DISABLED' for val in self.targets: tg = val[0] r = val[1] role_ref = ensure_param(t, r.output_role_arn()) # noqa target = events.Target() if isinstance(tg, (awslambda.LambdaStack)): func_name_ref = ensure_param(t, tg.output_func_name()) func_arn_ref = ensure_param(t, tg.output_func_arn()) target.Id = tg.get_stack_name() target.Arn = Ref(func_arn_ref) t.add_resource( tlambda.Permission('{}EventPerm'.format( tg.get_stack_name()), Action='lambda:invokeFunction', Principal='events.amazonaws.com', FunctionName=Ref(func_name_ref), SourceArn=GetAtt(evt, 'Arn'))) evt.Targets.append(target)
def add_to_template(self, t): """ """ zone_param = ensure_param(t, self.value.output_hosted_zone(), 'String') dns_param = ensure_param(t, self.value.output_dns_name(), 'String') r = route53.RecordSet( '{}ELBRecord'.format(self._safe_dns_name(self.name)), Name="{}{}.".format(self.name, self.stack.domain_name), Type="A", AliasTarget=route53.AliasTarget(HostedZoneId=Ref(zone_param), DNSName=Ref(dns_param))) return r
def add_to_template(self, t): dns_param = ensure_param(t, self.value.output_dns(), 'String') zone_id = "Z2FDTNDATAQYW2" r = route53.RecordSet( '{}CFRecord'.format(self._safe_dns_name(self.name)), Name="{}{}.".format(self.name, self.stack.domain_name), Type="A", AliasTarget=route53.AliasTarget(HostedZoneId=zone_id, DNSName=Ref(dns_param))) return r
def add_to_template(self, t): dns_param = ensure_param(t, self.value.output_endpoint(), 'String') r = route53.RecordSet('{}RDSRecord'.format( self._safe_dns_name(self.name)), Name="{}{}.".format(self.name, self.stack.domain_name), Type="CNAME", TTL=self.ttl, ResourceRecords=[Ref(dns_param)]) return r
def add_to_template(self, template): param = Ref(ensure_param(template, self.value.output_eip())) record = route53.RecordSet('{}EipARecord'.format( self._safe_dns_name(self.name)), Name="{}{}.".format(self.name, self.stack.domain_name), Type=self.type, TTL=self.ttl, ResourceRecords=[param]) return record
def build_template(self): t = self._init_template() task_role = ensure_param(t, self.task_role.output_role_arn()) exe_role = ensure_param(t, self.execution_role.output_role_arn()) task = t.add_resource( ecs.TaskDefinition('{}TaskDef'.format(self.stack_name), TaskRoleArn=Ref(task_role), Family=self.stack_name, ExecutionRoleArn=Ref(exe_role), RequiresCompatibilities=[self.mode], NetworkMode=self.network_mode, Cpu=self.cpu, Memory=self.memory, ContainerDefinitions=[])) for c in self.containers: task.ContainerDefinitions.append(c.build(t)) t.add_output([Output('TaskArn', Value=Ref(task))]) return t
def build(self, t): o = cloudfront.Origin(OriginPath=self.path, Id=self.get_id()) if isinstance(self.origin, s3.S3Bucket): domain_ref = t.add_parameter( Parameter(self.origin.output_bucket_url(), Type='String')) elif isinstance(self.origin, apigateway.SwaggerApiStack): domain_ref = ensure_param(t, self.origin.output_url()) elif isinstance(self.origin, elb.ELBStack): domain_ref = ensure_param(t, self.origin.output_dns_name()) else: domain_ref = t.add_parameter( Parameter('Input{}Origin'.format(self.name), Type='String')) co = cloudfront.CustomOriginConfig( OriginReadTimeout=self.origin_timeout, OriginProtocolPolicy=self.origin_proto, OriginSSLProtocols=self.ssl_protocols) o.CustomOriginConfig = co o.DomainName = Ref(domain_ref) return o
def build_template(self): t = self._init_template() zone = t.add_resource( route53.HostedZone("{}HostedZone".format(self.name), Name=self.domain_name)) if self.vpc: vpc_param = ensure_param(t, self.vpc.output_vpc()) zone.VPCs = [ route53.HostedZoneVPCs(VPCId=Ref(vpc_param), VPCRegion=Ref("AWS::Region")) ] group = t.add_resource( route53.RecordSetGroup( "{}RecordGroup".format(self.name), HostedZoneId=Ref(zone), DependsOn=zone, RecordSets=[rs.add_to_template(t) for rs in self.records])) return t
def build(self, t, app): role_param = ensure_param(t, self.role.output_role_arn()) group = t.add_resource( cdeploy.DeploymentGroup( '{}Group'.format(self.name), ServiceRoleArn=Ref(role_param), ApplicationName=Ref(app), DeploymentConfigName=self.strategy, # DeploymentStyle=cdeploy.DeploymentStyle( # DeploymentType='IN_PLACE', # DeploymentOption='WITH_TRAFFIC_CONTROL' # ), AutoScalingGroups=[])) for target in self.targets: if isinstance(target, (ec2.EC2Stack)): ec2_tag = t.add_parameter( Parameter(target.output_tag_name(), Type='String')) group.Ec2TagFilters.append( cdeploy.Ec2TagFilters(Key='Name', Value=Ref(ec2_tag), Type='KEY_AND_VALUE')) if isinstance(target, (asg.ASGStack)): asg_param = t.add_parameter( Parameter(target.output_asg(), Type='String')) group.AutoScalingGroups.append(Ref(asg_param)) t.add_output([ Output('{}Group'.format(self.name), Value=Ref(group)), ])
def build_template(self): deploy_alias = False t = self._init_template() self.jinja_env_vars() role_param = t.add_parameter( Parameter(self.role.output_role_arn(), Type='String')) bucket_ref = t.add_parameter( Parameter(self.s3_bucket.output_bucket_name(), Type='String')) if self._deploying: if not self.uploaded and self._bucket_name(): deploy_alias = True self._determine_code_versions() logger.info("S3 Key: {}".format(self.zip_name)) func = t.add_resource( awslambda.Function( '{}Function'.format(self.get_stack_name()), FunctionName=self.get_stack_name(), Handler=self.handler, MemorySize=self.memory, Timeout=self.timeout, Runtime=self.runtime, Role=Ref(role_param), Environment=awslambda.Environment(Variables=self.vars), Code=awslambda.Code(S3Bucket=Ref(bucket_ref), S3Key=self.zip_name))) if self.s3_version: func.Code.S3ObjectVersion = self.s3_version # vpc mode if self.vpc_stack is not None: if self.public_subnet: subnets = self.vpc_stack.output_public_subnets() else: subnets = self.vpc_stack.output_private_subnets() subnet_refs = [ Ref(utils.ensure_param(t, val, 'String')) for val in subnets ] func.VpcConfig = awslambda.VPCConfig(SubnetIds=subnet_refs, SecurityGroupIds=[]) for sg in self.security_groups: sg_ref = Ref( utils.ensure_param(t, sg.output_security_group(), 'String')) func.VpcConfig.SecurityGroupIds.append(sg_ref) if deploy_alias is True: for v in self.aliases: t.add_resource( awslambda.Alias('{}Alias'.format(v['name']), FunctionName=Ref(func), Name=v['name'], FunctionVersion=v['version'])) if len(self.event_sources) > 0: for s in self.event_sources: src = s['src'] args = s['args'] if isinstance(src, dynamodb.DynamoTable): p = t.add_parameter( Parameter(src.output_stream(), Type='String')) t.add_resource( awslambda.EventSourceMapping( 'LambdaDynamo{}'.format(src.name), FunctionName=Ref(func), EventSourceArn=Ref(p), StartingPosition='LATEST')) if isinstance(src, sqs.Queue): p = t.add_parameter( Parameter(src.output_queue_arn(), Type='String')) t.add_resource( awslambda.EventSourceMapping( 'LambdaSQS{}'.format(src.name), FunctionName=Ref(func), EventSourceArn=Ref(p), BatchSize=args.get('BatchSize', 1))) for k, v in enumerate(self.perms): v.build(t, funv, k) t.add_output([ Output('FunctionName', Value=Ref(func)), Output('FunctionArn', Value=GetAtt(func, "Arn")) ]) return t
def build_template(self): t = self._init_template() # ami input + param self.infra.add_var( 'Input{}EC2Ami'.format(self.stack_name), self.ami ) ami_param = t.add_parameter(Parameter( 'Input{}EC2Ami'.format(self.stack_name), Type='String' )) # tag Name tag_name = t.add_parameter(Parameter( "Input{}EC2TagName".format(self.stack_name), Type="String", Default='{}EC2'.format(self.stack_name), Description="Tag name for {} EC2 Stack".format(self.stack_name) )) # instance type instance_type = t.add_parameter( Parameter( "Input{}EC2InstanceType".format( self.stack_name), Type="String", Description="Instance Type for {} EC2 Stack".format( self.stack_name), Default="t2.micro")) # root file size root_device_size = t.add_parameter(Parameter( "Input{}EC2RootDeviceSize".format(self.stack_name), Type="String", Default="20", Description="{} Root Device File Size".format(self.stack_name) )) # root device name root_device_name = t.add_parameter(Parameter( "Input{}EC2RootDeviceName".format(self.stack_name), Type="String", Default="/dev/xvda", Description="{} Root Device Name".format(self.stack_name) )) # root device type root_device_type = t.add_parameter(Parameter( "Input{}EC2RootDeviceType".format(self.stack_name), Type="String", Default="gp2", Description="{} Root Device Type".format(self.stack_name) )) # instance profile instance_profile_param = t.add_parameter(Parameter( self.iam_profile.output_instance_profile(), Type='String' )) # user data params user_data = [] for i in range(0, 4): user_data.append( Ref(t.add_parameter(Parameter( '{}UserData{}'.format(self.stack_name, i), Type='String', Default=' ', Description='{} UserData #{}'.format(self.stack_name, i) ))) ) # subnet if self.private_subnet: subnet = self.vpc.output_private_subnets()[0] else: self.network_interfaces[0].AssociatePublicIpAddress = True subnet = self.vpc.output_public_subnets()[0] subnet_param = t.add_parameter(Parameter( subnet, Type='String', Description='Subnet for ec2 {}'.format(self.stack_name) )) self.network_interfaces[0].SubnetId = Ref(subnet_param) for sg in self.security_groups: sg_param = t.add_parameter(Parameter( sg.output_security_group(), Type='String' )) self.network_interfaces[0].GroupSet.append(Ref(sg_param)) volumes = [] for volume in self.volumes: device_name = t.add_parameter(Parameter( 'Input{}EBSDeviceName'.format(volume.name), Type='String' )) volume_id = t.add_parameter(Parameter( volume.output_volume(), Type="String" )) volumes.append(ec2.MountPoint( VolumeId=Ref(volume_id), Device=Ref(device_name) )) instance = t.add_resource(ec2.Instance( '{}EC2Instance'.format(self.stack_name), Tags=Tags( Name=Ref(tag_name) ), ImageId=Ref(ami_param), Volumes=volumes, InstanceType=Ref(instance_type), IamInstanceProfile=Ref(instance_profile_param), NetworkInterfaces=self.network_interfaces, BlockDeviceMappings=[ ec2.BlockDeviceMapping( DeviceName=Ref(root_device_name), Ebs=ec2.EBSBlockDevice( VolumeSize=Ref(root_device_size), VolumeType=Ref(root_device_type), DeleteOnTermination=True ) ) ], UserData=Base64( Join('', [ "#!/bin/bash\n", "exec > >(tee /var/log/user-data.log|logger ", "-t user-data -s 2>/dev/console) 2>&1\n", ] + user_data )) )) if self.use_key: instance.KeyName = self.use_key if self.eip is not None: eip_ref = ensure_param(t, self.eip.output_allocation_id()) t.add_resource(ec2.EIPAssociation( '{}EIPAssoc'.format(self.stack_name), InstanceId=Ref(instance), AllocationId=Ref(eip_ref) )) t.add_output([ Output( '{}EC2Instance'.format(self.stack_name), Value=Ref(instance) ), Output( "{}TagName".format(self.stack_name), Value=Ref(tag_name) ) ]) return t