def test_troposphere_type_create_multiple(self): troposphere_type = TroposphereType(s3.Bucket, many=True) created = troposphere_type.create({ "FirstBucket": {"BucketName": "test-bucket"}, "SecondBucket": {"BucketName": "other-test-bucket"}, }) self.assertTrue(isinstance(created, list))
def test_troposphere_type_create(self): troposphere_type = TroposphereType(s3.Bucket) created = troposphere_type.create( {"MyBucket": { "BucketName": "test-bucket" }}) self.assertTrue(isinstance(created, s3.Bucket)) self.assertTrue(created.properties["BucketName"], "test-bucket")
class FlexibleAutoScalingGroup(Blueprint): """ A more flexible AutoscalingGroup Blueprint. Uses TroposphereTypes to make creating AutoscalingGroups and their associated LaunchConfiguration more flexible. This comes at the price of doing less for you. """ VARIABLES = { "LaunchConfiguration": { "type": TroposphereType(autoscaling.LaunchConfiguration), "description": "The LaunchConfiguration for the autoscaling " "group.", }, "AutoScalingGroup": { "type": TroposphereType(autoscaling.AutoScalingGroup), "description": "The Autoscaling definition. Do not provide a " "LaunchConfiguration parameter, that will be " "automatically added from the LaunchConfiguration " "Variable.", }, } def create_launch_configuration(self): t = self.template variables = self.get_variables() self.launch_config = t.add_resource(variables["LaunchConfiguration"]) t.add_output( Output("LaunchConfiguration", Value=self.launch_config.Ref())) def add_launch_config_variable(self, asg): if getattr(asg, "LaunchConfigurationName", False): raise ValueError("Do not provide a LaunchConfigurationName " "variable for the AutoScalingGroup config.") asg.LaunchConfigurationName = self.launch_config.Ref() return asg def create_autoscaling_group(self): t = self.template variables = self.get_variables() asg = variables["AutoScalingGroup"] asg = self.add_launch_config_variable(asg) t.add_resource(asg) t.add_output(Output("AutoScalingGroup", Value=asg.Ref())) def create_template(self): self.create_launch_configuration() self.create_autoscaling_group()
class AccessPoints(Blueprint): VARIABLES = { 'AccessPoints': { 'type': TroposphereType(_AccessPoint, many=True), 'description': 'A dictionary of the AccessPoints to create. The ' 'key being the CFN logical resource name, the ' 'value being a dictionary of attributes for ' 'the troposphere efs.FileSystem type.', }, 'Tags': { 'type': dict, 'description': 'Tags to associate with the created resources', 'default': {} } } def create_template(self): t = self.template v = self.get_variables() access_points = v.get('AccessPoints') tags = v.get('Tags') for ap in access_points: # This is a major hack to inject extra tags in resources ap.AccessPointTags = merge_tags(tags, getattr(ap, 'Tags', {})) ap_name = ap.name ap = t.add_resource(ap) t.add_output(Output( '{}Id'.format(ap_name), Value=GetAtt(ap, 'AccessPointId'))) t.add_output(Output( '{}Arn'.format(ap_name), Value=GetAtt(ap, 'Arn')))
class FunctionScheduler(Blueprint): VARIABLES = { "CloudwatchEventsRule": { "type": TroposphereType(events.Rule), "description": "The troposphere.events.Rule object params.", }, } def create_scheduler(self): variables = self.get_variables() troposphere_events_rule = variables["CloudwatchEventsRule"] aws_lambda_arns = {} # iterate over targets in the event Rule & gather aws_lambda_arns. for target in getattr(troposphere_events_rule, "Targets", []): if target.Arn.startswith("arn:aws:lambda:"): safe_id = cf_safe_name(target.Id) aws_lambda_arns[safe_id] = target.Arn # schedule a Cloudwatch event rule to invoke the Targets. rule = self.template.add_resource(troposphere_events_rule) # allow cloudwatch to invoke on any of the given lambda targets. for event_rule_target_id, aws_lambda_arn in aws_lambda_arns.items(): self.template.add_resource( awslambda.Permission( "PermToInvokeFunctionFor{}".format(event_rule_target_id), Principal="events.amazonaws.com", Action="lambda:InvokeFunction", FunctionName=aws_lambda_arn, SourceArn=rule.GetAtt("Arn"))) def create_template(self): self.create_scheduler()
def _resolve_troposphere_var(self, tpe, value, **kwargs): var_name = "testVar" var_def = {"type": TroposphereType(tpe, **kwargs)} provided_variable = Variable(var_name, value) blueprint_name = "testBlueprint" return resolve_variable(var_name, var_def, provided_variable, blueprint_name)
def test_resolve_variable_troposphere_type(self): var_name = "testVar" var_def = {"type": TroposphereType(s3.Bucket)} provided_variable = Variable(var_name, {"MyBucket": {"BucketName": "some-bucket"}}) blueprint_name = "testBlueprint" value = resolve_variable(var_name, var_def, provided_variable, blueprint_name) self.assertTrue(isinstance(value, s3.Bucket)) self.assertEqual(value.properties["BucketName"], "some-bucket") self.assertEqual(value.title, "MyBucket")
def test_resolve_variable_troposphere_list_type(self): var_name = "testVar" var_def = {"type": TroposphereType(s3.Bucket, many=True)} bucket_defs = { "FirstBucket": {"BucketName": "some-bucket"}, "SecondBucket": {"BucketName": "some-other-bucket"}, } provided_variable = Variable(var_name, bucket_defs) blueprint_name = "testBlueprint" value = resolve_variable(var_name, var_def, provided_variable, blueprint_name) for bucket in value: self.assertTrue(isinstance(bucket, s3.Bucket)) self.assertEqual(bucket.properties, bucket_defs[bucket.title])
class Instances(Blueprint): """ Manages the creation of EC2 Instance resources. """ VARIABLES = { "Instances": { "type": TroposphereType(ec2.Instance, many=True), "description": "Dictionary of EC2 Instance definitions.", }, } def has_public_ip(self, instance): network_interfaces = instance.properties.get("NetworkInterfaces", []) has_public_ip = False for interface in network_interfaces: if int(interface.properties["DeviceIndex"]) == 0: has_public_ip = interface.properties.get( "AssociatePublicIpAddress") == "true" break return has_public_ip def create_template(self): t = self.template variables = self.get_variables() for instance in variables["Instances"]: t.add_resource(instance) title = instance.title t.add_output(Output(title + "InstanceId", Value=instance.Ref())) t.add_output( Output(title + "AZ", Value=instance.GetAtt("AvailabilityZone"))) t.add_output( Output(title + "PrivateDnsName", Value=instance.GetAtt("PrivateDnsName"))) t.add_output( Output(title + "PrivateIp", Value=instance.GetAtt("PrivateIp"))) if self.has_public_ip(instance): t.add_output( Output(title + "PublicIp", Value=instance.GetAtt("PublicIp"))) t.add_output( Output(title + "PublicDnsName", Value=instance.GetAtt("PublicDnsName")))
class DynamoDB(Blueprint): """Manages the creation of DynamoDB tables. Example:: - name: users class_path: stacker_blueprints.dynamodb.DynamoDB variables: Tables: UserTable: TableName: prod-user-table KeySchema: - AttributeName: id KeyType: HASH - AttributeName: name KeyType: RANGE AttributeDefinitions: - AttributeName: id AttributeType: S - AttributeName: name AttributeType: S ProvisionedThroughput: ReadCapacityUnits: 5 WriteCapacityUnits: 5 StreamSpecification: StreamViewType: ALL """ VARIABLES = { "Tables": { "type": TroposphereType(dynamodb.Table, many=True), "description": "DynamoDB tables to create.", } } def create_template(self): t = self.template variables = self.get_variables() for table in variables["Tables"]: t.add_resource(table) stream_enabled = table.properties.get("StreamSpecification") if stream_enabled: t.add_output( Output("{}StreamArn".format(table.title), Value=GetAtt(table, "StreamArn"))) t.add_output(Output("{}Name".format(table.title), Value=Ref(table)))
class SubscriptionFilters(Blueprint): VARIABLES = { "SubscriptionFilters": { "type": TroposphereType(logs.SubscriptionFilter, many=True), "description": "Subscription filters to create.", } } def create_template(self): t = self.template variables = self.get_variables() for _filter in variables["SubscriptionFilters"]: t.add_resource(_filter) t.add_output(Output("%sName" % _filter.title, Value=Ref(_filter)))
class Certificates(Blueprint): VARIABLES = { "Certificates": { "type": TroposphereType(acm.Certificate, many=True), "description": "ACM Certificate configurations.", }, } def create_template(self): t = self.template variables = self.get_variables() for cert in variables["Certificates"]: t.add_resource(cert) self.add_output("%sId" % cert.title, cert.Ref()) self.add_output("%sArn" % cert.title, cert.Ref())
class Queues(Blueprint): """Manages the creation of SQS queues.""" VARIABLES = { "Queues": { "type": TroposphereType(sqs.Queue, many=True), "description": "Dictionary of SQS queue definitions", }, } def create_template(self): t = self.template variables = self.get_variables() for queue in variables["Queues"]: t.add_resource(queue) t.add_output( Output(queue.title + "Arn", Value=GetAtt(queue, "Arn"))) t.add_output(Output(queue.title + "Url", Value=Ref(queue)))
class SecurityGroups(Blueprint): VARIABLES = { "SecurityGroups": { "type": TroposphereType(ec2.SecurityGroup, many=True), "description": "Configuration for multiple security groups.", } } def create_template(self): t = self.template for security_group in self.get_variables()["SecurityGroups"]: t.add_resource(security_group) title = security_group.title t.add_output( Output( title + "Id", Value=security_group.GetAtt("GroupId"), ))
class Cluster(Blueprint): VARIABLES = { 'Clusters': { 'type': TroposphereType(_Cluster, many=True), 'description': 'A dictinary where key is the resource name and ' 'the value is a MSK cluster as defined by ' 'Cloudformation' } } def create_template(self): v = self.get_variables() t = self.template clusters = v.get('Clusters') for cluster in clusters: t.add_resource(cluster) t.add_output(Output(cluster.title + 'Arn', Value=Ref(cluster)))
class Instances(Blueprint): """ Manages the creation of EC2 Instance resources. """ VARIABLES = { "Instances": { "type": TroposphereType(ec2.Instance, many=True), "description": "Dictionary of EC2 Instance definitions.", }, } def create_template(self): t = self.template variables = self.get_variables() for instance in variables["Instances"]: t.add_resource(instance) title = instance.title t.add_output(Output(title + "InstanceId", Value=instance.Ref())) t.add_output( Output(title + "AZ", Value=instance.GetAtt("AvailabilityZone"))) t.add_output( Output(title + "PrivateDnsName", Value=instance.GetAtt("PrivateDnsName"))) t.add_output( Output(title + "PublicDnsName", Value=instance.GetAtt("PublicDnsName"))) t.add_output( Output(title + "PrivateIp", Value=instance.GetAtt("PrivateIp"))) t.add_output( Output(title + "PublicIp", Value=instance.GetAtt("PublicIp")))
class ElasticFileSystem(Blueprint): VARIABLES = { 'VpcId': { 'type': str, 'description': 'VPC ID to create resources' }, 'PerformanceMode': { 'type': str, 'description': 'The performance mode of the file system', 'default': 'generalPurpose' }, 'Tags': { 'type': dict, 'description': 'Tags to associate with the created resources', 'default': {} }, 'Subnets': { 'type': list, 'description': 'List of subnets to deploy private mount targets in' }, 'IpAddresses': { 'type': list, 'description': 'List of IP addresses to assign to mount targets. ' 'Omit or make empty to assign automatically. ' 'Corresponds to Subnets listed in the same order.', 'default': [] }, 'SecurityGroups': { 'type': TroposphereType(ec2.SecurityGroup, many=True, optional=True, validate=False), 'description': "Dictionary of titles to SecurityGroups " "definitions to be created and assigned to this " "filesystem's MountTargets. " "The VpcId property will be filled automatically, " "so it should not be included. \n" "The IDs of the created groups will be exported as " "a comma-separated list in the " "EfsNewSecurityGroupIds output.\n" "Omit this parameter or set it to an empty " "dictionary to not create any groups. In that " "case the ExistingSecurityGroups variable must not " "be empty", 'default': {} }, 'ExtraSecurityGroups': { 'type': list, 'description': "List of existing SecurityGroup IDs to be asigned " "to this filesystem's MountTargets", 'default': [] } } def validate_efs_security_groups(self): validator = '{}.{}'.format( type(self).__name__, 'validate_efs_security_groups') v = self.get_variables() count = len(v['SecurityGroups'] or []) + len(v['ExtraSecurityGroups']) if count == 0: raise ValidatorError( 'SecurityGroups,ExtraSecurityGroups', validator, count, 'At least one SecurityGroup must be provided') elif count > 5: raise ValidatorError( 'SecurityGroups,ExtraSecurityGroups', validator, count, 'At most five total SecurityGroups must be provided') def validate_efs_subnets(self): validator = '{}.{}'.format(type(self).__name__, 'validate_efs_subnets') v = self.get_variables() subnet_count = len(v['Subnets']) if not subnet_count: raise ValidatorError('Subnets', validator, v['Subnets'], 'At least one Subnet must be provided') ip_count = len(v['IpAddresses']) if ip_count and ip_count != subnet_count: raise ValidatorError( 'IpAddresses', validator, v['IpAddresses'], 'The number of IpAddresses must match the number of Subnets') def resolve_variables(self, provided_variables): super(ElasticFileSystem, self).resolve_variables(provided_variables) self.validate_efs_security_groups() self.validate_efs_subnets() def prepare_efs_security_groups(self): t = self.template v = self.get_variables() created_groups = [] for sg in v['SecurityGroups']: sg.VpcId = v['VpcId'] sg.Tags = merge_tags(v['Tags'], getattr(sg, 'Tags', {})) sg = t.add_resource(sg) created_groups.append(sg) created_group_ids = list(map(Ref, created_groups)) t.add_output( Output('EfsNewSecurityGroupIds', Value=Join(',', created_group_ids))) groups_ids = created_group_ids + v['ExtraSecurityGroups'] return groups_ids def create_efs_filesystem(self): t = self.template v = self.get_variables() fs = t.add_resource( efs.FileSystem('EfsFileSystem', FileSystemTags=Tags(v['Tags']), PerformanceMode=v['PerformanceMode'])) t.add_output(Output('EfsFileSystemId', Value=Ref(fs))) return fs def create_efs_mount_targets(self, fs): t = self.template v = self.get_variables() groups = self.prepare_efs_security_groups() subnets = v['Subnets'] ips = v['IpAddresses'] mount_targets = [] for i, subnet in enumerate(subnets): mount_target = efs.MountTarget('EfsMountTarget{}'.format(i + 1), FileSystemId=Ref(fs), SubnetId=subnet, SecurityGroups=groups) if ips: mount_target.IpAddress = ips[i] mount_target = t.add_resource(mount_target) mount_targets.append(mount_target) t.add_output( Output('EfsMountTargetIds', Value=Join(',', list(map(Ref, mount_targets))))) def create_template(self): fs = self.create_efs_filesystem() self.create_efs_mount_targets(fs)
def defined_variables(self): variables = super(BaseECSApp, self).defined_variables() extra_vars = { "AppName": { "type": str, "description": "A simple name for the application.", }, "Cluster": { "type": str, "description": "The name or Amazon Resource Name (ARN) of the " "ECS cluster that you want to run your tasks " "on.", }, "Count": { "type": int, "description": "The number of instances of the task to " "create.", "default": 1, }, "DeploymentConfiguration": { "type": TroposphereType(ecs.DeploymentConfiguration, optional=True), "description": "An optional DeploymentConfiguration object.", "default": None, }, "PlacementConstraints": { "type": TroposphereType( ecs.PlacementConstraint, optional=True, many=True, ), "description": "An optional list of PlacementConstraint " "objects.", "default": None, }, "LoadBalancerTargetGroupArns": { "type": list, "description": "A list of load balancer target group arns " "to attach to the container. Requires that " "the ContainerPort be set.", "default": [], }, "HealthCheckGracePeriodSeconds": { "type": int, "description": "An optional grace period for load balancer " "health checks against the service when it " "starts up.", "default": 0, }, } variables.update(extra_vars) return variables
class BaseECSTask(Blueprint): VARIABLES = { "TaskName": { "type": str, "description": "A name for the task/process.", }, "Image": { "type": str, "description": "The docker image to use for the task.", }, "Command": { "type": list, "description": "A list of the command and it's arguments to run " "inside the container. If not provided, will " "default to the default command defined in the " "image.", "default": [], }, "CPU": { "type": int, "description": "The relative CPU shares used by each instance of " "the task.", }, "Memory": { "type": int, "description": "The amount of memory (in megabytes) to reserve " "for each instance of the task.", }, "NetworkMode": { "type": str, "description": "The NetworkMode to use in the task definition.", "default": "", }, "Environment": { "type": dict, "description": "A dictionary representing the environment of the " "task.", "default": {}, }, "LogConfiguration": { "type": TroposphereType(ecs.LogConfiguration, optional=True), "description": "An optional log configuration object. If one is " "not provided, the default is to send logs into " "a Cloudwatch Log LogGroup named after the " "ServiceName", "default": None, }, "TaskRoleArn": { "type": str, "description": "An optional role to run the task as.", "default": "", }, "ContainerPort": { "type": int, "description": "The port of the container to expose to the " "network. Defaults to not exposing any ports.", "default": 0, }, "HostPort": { "type": int, "description": "The host port to bind to the container port, if " "ContainerPort is specified. If not, does " "nothing. If HostPort is not specified, a dynamic " "port mapping will be used.", "default": 0, }, "ContainerProtocol": { "type": str, "description": "If set, must be either tcp or udp. Requires that " "ContainerPort is set as well. Default: tcp", "default": "", }, } @property def task_name(self): return self.get_variables()["TaskName"] @property def image(self): return self.get_variables()["Image"] @property def command(self): return self.get_variables()["Command"] or NoValue @property def cpu(self): return self.get_variables()["CPU"] @property def task_definition_cpu(self): return NoValue @property def memory(self): return self.get_variables()["Memory"] @property def task_definition_memory(self): return NoValue @property def environment(self): env_dict = self.get_variables()["Environment"] if not env_dict: return NoValue env_list = [] # Sort it first to avoid dict sort issues on different machines sorted_env = sorted(env_dict.items(), key=lambda pair: pair[0]) for k, v in sorted_env: env_list.append(ecs.Environment(Name=str(k), Value=str(v))) return env_list @property def log_group_name(self): return self.task_name @property def log_configuration(self): log_config = self.get_variables()["LogConfiguration"] if not log_config: log_config = ecs.LogConfiguration(LogDriver="awslogs", Options={ "awslogs-group": self.log_group_name, "awslogs-region": Region, "awslogs-stream-prefix": self.task_name, }) return log_config @property def task_role_arn(self): return self.get_variables()["TaskRoleArn"] @property def network_mode(self): return self.get_variables()["NetworkMode"] or NoValue @property def container_port(self): return self.get_variables()["ContainerPort"] @property def host_port(self): host_port = self.get_variables()["HostPort"] if host_port and not self.container_port: raise ValueError("Must specify ContainerPort if specifying " "HostPort") return host_port @property def container_protocol(self): container_protocol = self.get_variables()["ContainerProtocol"] if container_protocol and not self.container_port: raise ValueError("Must specify ContainerPort if specifying " "ContainerProtocol") return container_protocol @property def container_port_mappings(self): mappings = NoValue if self.container_port: kwargs = {"ContainerPort": self.container_port} if self.host_port: kwargs["HostPort"] = self.host_port if self.container_protocol: kwargs["Protocol"] = self.container_protocol mappings = [ecs.PortMapping(**kwargs)] return mappings @property def container_name(self): return self.task_name def create_task_role(self): if self.task_role_arn: self.add_output("RoleArn", self.task_role_arn) return t = self.template self.task_role = t.add_resource( iam.Role( "Role", AssumeRolePolicyDocument=get_ecs_task_assumerole_policy(), Path="/", )) self.add_output("RoleName", self.task_role.Ref()) self.add_output("RoleArn", self.task_role.GetAtt("Arn")) self.add_output("RoleId", self.task_role.GetAtt("RoleId")) def generate_policy_document(self): return None def create_task_role_policy(self): policy_doc = self.generate_policy_document() if self.task_role_arn or not policy_doc: return t = self.template self.task_role_policy = t.add_resource( iam.ManagedPolicy( "ManagedPolicy", PolicyDocument=policy_doc, Roles=[self.task_role.Ref()], )) self.add_output("ManagedPolicyArn", self.task_role_policy.Ref()) def generate_container_definition_kwargs(self): kwargs = { "Command": self.command, "Cpu": self.cpu, "Environment": self.environment, "Essential": True, "Image": self.image, "LogConfiguration": self.log_configuration, "Memory": self.memory, "Name": self.container_name, "PortMappings": self.container_port_mappings, } return kwargs def generate_container_definition(self): return ecs.ContainerDefinition( **self.generate_container_definition_kwargs()) def generate_task_definition_kwargs(self): task_role_arn = self.task_role_arn or self.task_role.GetAtt("Arn") return { "Cpu": self.task_definition_cpu, "Memory": self.task_definition_memory, "NetworkMode": self.network_mode, "TaskRoleArn": task_role_arn, "ContainerDefinitions": [self.generate_container_definition()], } def create_task_definition(self): t = self.template self.task_definition = t.add_resource( ecs.TaskDefinition("TaskDefinition", **self.generate_task_definition_kwargs())) self.add_output("TaskDefinitionArn", self.task_definition.Ref()) def create_template(self): self.create_task_role() self.create_task_role_policy() self.create_task_definition()
def test_troposphere_type_no_from_dict(self): with self.assertRaises(ValueError): TroposphereType(object) with self.assertRaises(ValueError): TroposphereType(object, many=True)
class ElasticFileSystem(Blueprint): VARIABLES = { 'FileSystem': { 'type': TroposphereType(efs.FileSystem), 'description': 'A dictionary of the FileSystem to create. The key ' 'being the CFN logical resource name, the ' 'value being a dictionary of attributes for ' 'the troposphere efs.FileSystem type.', }, 'VpcId': { 'type': str, 'description': 'VPC ID to create resources' }, 'Tags': { 'type': dict, 'description': 'Tags to associate with the created resources', 'default': {} }, 'Subnets': { 'type': list, 'description': 'List of subnets to deploy private mount targets ' 'in. Can not be used together with SubnetsStr. You' 'must choose only one way to inform this parameter', 'default': [] }, 'SubnetsStr': { 'type': str, 'description': 'A comma sepparated list of subnets to deploy ' 'private mount targets in. Can not be used in ' 'addition to Subnets, you must choose only one way' 'to inform this parameter', 'default': '' }, 'IpAddresses': { 'type': list, 'description': 'List of IP addresses to assign to mount targets. ' 'Omit or make empty to assign automatically. ' 'Corresponds to Subnets listed in the same order.', 'default': [] }, 'SecurityGroups': { 'type': TroposphereType(ec2.SecurityGroup, many=True, optional=True, validate=False), 'description': "Dictionary of titles to SecurityGroups " "definitions to be created and assigned to this " "filesystem's MountTargets. " "The VpcId property will be filled automatically, " "so it should not be included. \n" "The IDs of the created groups will be exported as " "a comma-separated list in the " "EfsNewSecurityGroupIds output.\n" "Omit this parameter or set it to an empty " "dictionary to not create any groups. In that " "case the ExistingSecurityGroups variable must not " "be empty", 'default': {} }, 'ExtraSecurityGroups': { 'type': list, 'description': "List of existing SecurityGroup IDs to be asigned " "to this filesystem's MountTargets", 'default': [] } } def get_subnets_from_string_list(self): v = self.get_variables() def check_empty_string(value): return value != '' subnets = v['SubnetsStr'].split(',') return filter(check_empty_string, subnets) def validate_efs_security_groups(self): validator = '{}.{}'.format(type(self).__name__, 'validate_efs_security_groups') v = self.get_variables() count = len(v['SecurityGroups'] or []) + len(v['ExtraSecurityGroups']) if count == 0: raise ValidatorError( 'SecurityGroups,ExtraSecurityGroups', validator, count, 'At least one SecurityGroup must be provided') elif count > 5: raise ValidatorError( 'SecurityGroups,ExtraSecurityGroups', validator, count, 'At most five total SecurityGroups must be provided') def validate_efs_subnets(self): validator = '{}.{}'.format(type(self).__name__, 'validate_efs_subnets') v = self.get_variables() subnet_count = len(v['Subnets']) subnet_str_count = len(self.get_subnets_from_string_list()) if not subnet_count and not subnet_str_count: variables = { 'Subnets': v['Subnets'], 'SubnetsStr': v['SubnetsStr'] } raise ValidatorError( 'Subnets', validator, variables, 'At least one Subnet or SubnetStr must be provided') if subnet_count and subnet_str_count: variables = { 'Subnets': v['Subnets'], 'SubnetsStr': v['SubnetsStr'] } raise ValidatorError( 'Subnets and SubnetsStr', validator, variables, 'Only one of Subnet or SubnetStr can be provided') ip_count = len(v['IpAddresses']) if ip_count and ip_count != max(subnet_count, subnet_str_count): raise ValidatorError( 'IpAddresses', validator, v['IpAddresses'], 'The number of IpAddresses must match the number of Subnets') def resolve_variables(self, provided_variables): super(ElasticFileSystem, self).resolve_variables(provided_variables) self.validate_efs_security_groups() self.validate_efs_subnets() def prepare_efs_security_groups(self): t = self.template v = self.get_variables() created_groups = [] for sg in v['SecurityGroups'] or {}: sg.VpcId = v['VpcId'] sg.Tags = merge_tags(v['Tags'], getattr(sg, 'Tags', {})) sg = t.add_resource(sg) created_groups.append(sg) created_group_ids = list(map(Ref, created_groups)) t.add_output(Output( 'EfsNewSecurityGroupIds', Value=Join(',', created_group_ids))) groups_ids = created_group_ids + v['ExtraSecurityGroups'] return groups_ids def create_efs_filesystem(self): t = self.template v = self.get_variables() fs = v.get('FileSystem') # This is a major hack to inject extra tags in efs resources fs.FileSystemTags = merge_tags(v['Tags'], getattr(fs, 'Tags', {})) fs = t.add_resource(fs) t.add_output(Output( 'EfsFileSystemId', Value=Ref(fs))) return fs def create_efs_mount_targets(self, fs): t = self.template v = self.get_variables() groups = self.prepare_efs_security_groups() subnets = self.get_subnets_from_string_list() subnets += v['Subnets'] ips = v['IpAddresses'] mount_targets = [] for i, subnet in enumerate(subnets): mount_target = efs.MountTarget( 'EfsMountTarget{}'.format(i + 1), FileSystemId=Ref(fs), SubnetId=subnet, SecurityGroups=groups) if ips: mount_target.IpAddress = ips[i] mount_target = t.add_resource(mount_target) mount_targets.append(mount_target) t.add_output(Output( 'EfsMountTargetIds', Value=Join(',', list(map(Ref, mount_targets))))) def create_template(self): fs = self.create_efs_filesystem() self.create_efs_mount_targets(fs)
def test_troposphere_type_create(self): troposphere_type = TroposphereType(s3.Bucket) created = troposphere_type.create( {"MyBucket": {"BucketName": "test-bucket"}}) self.assertTrue(isinstance(created, s3.Bucket)) self.assertTrue(created.properties["BucketName"], "test-bucket")
class VPC2(Blueprint): """This is a stripped down version of the VPC Blueprint.""" VARIABLES = { "VPC": { "type": TroposphereType(ec2.VPC), }, "InternalZone": { "type": TroposphereType(route53.HostedZone, optional=True), "description": "The config for an internal zone. If provided, " "the zone will be created with the VPCs setting " "set to this VPC.", "default": None, }, } def create_vpc(self): t = self.template variables = self.get_variables() self.vpc = t.add_resource(variables["VPC"]) t.add_output(Output("VpcId", Value=self.vpc.Ref())) attrs = [ "CidrBlock", "CidrBlockAssociations", "DefaultNetworkAcl", "DefaultSecurityGroup", "Ipv6CidrBlocks" ] for attr in attrs: t.add_output(Output(attr, Value=self.vpc.GetAtt(attr))) def create_internet_gateway(self): t = self.template self.gateway = t.add_resource(ec2.InternetGateway("InternetGateway")) t.add_output(Output( "InternetGatewayId", Value=self.gateway.Ref(), )) self.gateway_attachment = t.add_resource( ec2.VPCGatewayAttachment( "VPCGatewayAttachment", VpcId=self.vpc.Ref(), InternetGatewayId=self.gateway.Ref(), )) t.add_output( Output( "VPCGatewayAttachmentId", Value=self.gateway_attachment.Ref(), )) def create_internal_zone(self): t = self.template variables = self.get_variables() self.zone = variables["InternalZone"] if self.zone: hosted_zone_vpc = route53.HostedZoneVPCs( VPCId=self.vpc.Ref(), VPCRegion=Region, ) self.zone.VPCs = [ hosted_zone_vpc, ] t.add_resource(self.zone) t.add_output(Output( "InternalZoneId", Value=self.zone.Ref(), )) t.add_output(Output( "InternalZoneName", Value=self.zone.Name, )) def create_dhcp_options(self): t = self.template search_path = NoValue if self.zone: search_path = self.zone.Name self.dhcp_options = t.add_resource( ec2.DHCPOptions( "DHCPOptions", DomainName=search_path, DomainNameServers=[ "AmazonProvidedDNS", ], )) t.add_output(Output( "DHCPOptionsId", Value=self.dhcp_options.Ref(), )) self.dhcp_association = t.add_resource( ec2.VPCDHCPOptionsAssociation( "VPCDHCPOptionsAssociation", VpcId=self.vpc.Ref(), DhcpOptionsId=self.dhcp_options.Ref(), )) t.add_output( Output( "VPCDHCPOptionsAssociation", Value=self.dhcp_association.Ref(), )) def create_template(self): self.create_vpc() self.create_internet_gateway() self.create_internal_zone() self.create_dhcp_options()
class Streams(Blueprint): VARIABLES = { "Streams": { "type": TroposphereType(kinesis.Stream, many=True), "description": "A dictionary of streams to create. The key " "being the CFN logical resource name, the " "value being a dictionary of attributes for " "the troposphere kinesis.Stream type.", }, "ReadWriteRoles": { "type": list, "description": "A list of roles that should have read/write " "access to the stream created.", "default": [] }, "ReadRoles": { "type": list, "description": "A list of roles that should have read-only " "access to the streams created.", "default": [] }, } def create_template(self): t = self.template variables = self.get_variables() streams = variables["Streams"] stream_ids = [] for stream in streams: s = t.add_resource(stream) t.add_output(Output("%sStreamId" % stream.title, Value=s.Ref())) t.add_output(Output("%sStreamArn" % stream.title, Value=s.GetAtt("Arn"))) stream_ids.append(s.Ref()) stream_arns = [kinesis_stream_arn(stream) for stream in stream_ids] read_write_roles = variables["ReadWriteRoles"] if read_write_roles: t.add_resource( iam.PolicyType( "ReadWritePolicy", PolicyName=Sub("${AWS::StackName}ReadWritePolicy"), PolicyDocument=read_write_kinesis_stream_policy( stream_arns ), Roles=read_write_roles, ) ) read_only_roles = variables["ReadRoles"] if read_only_roles: t.add_resource( iam.PolicyType( "ReadPolicy", PolicyName=Sub("${AWS::StackName}ReadPolicy"), PolicyDocument=read_only_kinesis_stream_policy( stream_arns ), Roles=read_only_roles, ) )
class SimpleECSService(Blueprint): VARIABLES = { "ServiceName": { "type": str, "description": "A simple name for the service.", }, "Image": { "type": str, "description": "The docker image to use for the task.", }, "Command": { "type": list, "description": "A list of the command and it's arguments to run " "inside the container. If not provided, will " "default to the default command defined in the " "image.", "default": [], }, "Cluster": { "type": str, "description": "The name or Amazon Resource Name (ARN) of the " "ECS cluster that you want to run your tasks on.", }, "CPU": { "type": int, "description": "The relative CPU shares used by each instance of " "the task.", }, "Memory": { "type": int, "description": "The amount of memory (in megabytes) to reserve " "for each instance of the task.", }, "Count": { "type": int, "description": "The number of instances of the task to create.", "default": 1, }, "Environment": { "type": dict, "description": "A dictionary representing the environment of the " "task.", "default": {}, }, "LogConfiguration": { "type": TroposphereType(ecs.LogConfiguration, optional=True), "description": "An optional log configuration object.", "default": None, }, } @property def service_name(self): return self.get_variables()["ServiceName"] @property def image(self): return self.get_variables()["Image"] @property def command(self): return self.get_variables()["Command"] or NoValue @property def cluster(self): return self.get_variables()["Cluster"] @property def cpu(self): return self.get_variables()["CPU"] @property def memory(self): return self.get_variables()["Memory"] @property def count(self): return self.get_variables()["Count"] @property def environment(self): env_dict = self.get_variables()["Environment"] if not env_dict: return NoValue env_list = [] for k, v in env_dict.items(): env_list.append(ecs.Environment(Name=str(k), Value=str(v))) return env_list @property def log_configuration(self): log_config = self.get_variables()["LogConfiguration"] return log_config or NoValue def add_output(self, key, value): self.template.add_output(Output(key, Value=value)) def create_role(self): t = self.template self.role = t.add_resource( iam.Role( "Role", AssumeRolePolicyDocument=get_ecs_task_assumerole_policy(), Path="/", ) ) self.add_output("RoleName", self.role.Ref()) self.add_output("RoleArn", self.role.GetAtt("Arn")) self.add_output("RoleId", self.role.GetAtt("RoleId")) def generate_policy_document(self): return None def create_policy(self): t = self.template policy_doc = self.generate_policy_document() if not policy_doc: return self.policy = t.add_resource( iam.ManagedPolicy( "ManagedPolicy", PolicyDocument=self.generate_policy(), Roles=[self.role.Ref()], ) ) self.add_output("ManagedPolicyArn", self.policy.Ref()) def generate_container_definition(self): return ecs.ContainerDefinition( Command=self.command, Cpu=self.cpu, Environment=self.environment, Essential=True, Image=self.image, LogConfiguration=self.log_configuration, Memory=self.memory, Name=self.service_name, ) def create_task_definition(self): t = self.template self.task_definition = t.add_resource( ecs.TaskDefinition( "TaskDefinition", Cpu=str(self.cpu), Family=self.service_name, Memory=str(self.memory), TaskRoleArn=self.role.GetAtt("Arn"), ContainerDefinitions=[self.generate_container_definition()] ) ) self.add_output("TaskDefinitionArn", self.task_definition.Ref()) def create_service(self): t = self.template self.service = t.add_resource( ecs.Service( "Service", Cluster=self.cluster, DesiredCount=self.count, LaunchType="EC2", ServiceName=self.service_name, TaskDefinition=self.task_definition.Ref(), ) ) self.add_output("ServiceArn", self.service.Ref()) self.add_output("ServiceName", self.service.GetAtt("Name")) def create_template(self): self.create_role() self.create_policy() self.create_task_definition() self.create_service()
class Network(Blueprint): VARIABLES = { "VpcId": { "type": str, "description": "The Id of the VPC to create the network in.", }, "InternetGatewayId": { "type": str, "description": "If defined, this network will be a public " "network, and the default route will be through " "the internet gateway. This and NatGatewayId are " "mutually exclusive and cannot be set on the same " "stack.", "default": "", }, "CreateNatGateway": { "type": bool, "description": "If set to true, and no NatGatewayId is specified " "a NatGateway will be created.", "default": False, }, "NatGatewayId": { "type": str, "description": "If defined, this network will be a private " "network, and the default route will be through " "the nat gateway. This and InternetGatewayId are " "mutually exclusive and cannot be set on the same " "stack.", "default": "", }, "AvailabilityZone": { "type": str, "description": "The Availability Zone to create the network in.", }, "CidrBlock": { "type": str, "description": "The cidr network range to assign the subnet.", }, "Tags": { "type": dict, "description": "A dictionary of tag key/values to add to all " "resources that accept tags.", "default": {}, }, "Routes": { "type": TroposphereType(ec2.Route, many=True), "description": "A list of dictionary of ec2.Routes but without " "RouteTableId to create in the default route " "table.", "default": {}, }, } @property def vpc_id(self): return self.get_variables()["VpcId"] @property def network_type(self): if self.internet_gateway_id is not NoValue: return "public" return "private" @property def internet_gateway_id(self): return self.get_variables()["InternetGatewayId"] or NoValue @property def nat_gateway_id(self): return self.get_variables()["NatGatewayId"] or NoValue @property def availability_zone(self): return self.get_variables()["AvailabilityZone"] @property def cidr_block(self): return self.get_variables()["CidrBlock"] @property def tags(self): variables = self.get_variables() tag_dict = {"NetworkType": self.network_type} tag_dict.update(variables["Tags"]) tags = Tags(**tag_dict) return tags @property def routes(self): return self.get_variables()["Routes"] def create_subnet(self): t = self.template self.subnet = t.add_resource( ec2.Subnet( "Subnet", VpcId=self.vpc_id, AvailabilityZone=self.availability_zone, CidrBlock=self.cidr_block, Tags=self.tags, )) t.add_output(Output("SubnetId", Value=self.subnet.Ref())) t.add_output(Output("NetworkType", Value=self.network_type)) t.add_output(Output("CidrBlock", Value=self.cidr_block)) attrs = ["AvailabilityZone", "NetworkAclAssociationId", "VpcId"] for attr in attrs: t.add_output(Output(attr, Value=self.subnet.GetAtt(attr))) list_attrs = ["Ipv6CidrBlocks"] for attr in list_attrs: t.add_output( Output(attr, Value=Join(",", self.subnet.GetAtt(attr)))) def create_route_table(self): t = self.template self.route_table = t.add_resource( ec2.RouteTable( "RouteTable", VpcId=self.vpc_id, Tags=self.tags, )) t.add_output(Output("RouteTableId", Value=self.route_table.Ref())) self.route_table_assoc = t.add_resource( ec2.SubnetRouteTableAssociation( "SubnetRouteTableAssociation", SubnetId=self.subnet.Ref(), RouteTableId=self.route_table.Ref(), )) t.add_output( Output("SubnetRouteTableAssociationId", Value=self.route_table_assoc.Ref())) def create_nat_gateway(self): t = self.template variables = self.get_variables() if variables["NatGatewayId"] or not variables["CreateNatGateway"]: return self.nat_gateway_eip = t.add_resource( ec2.EIP("NatGatewayEIP", Domain="vpc")) t.add_output(Output("NatGatewayEIP", Value=self.nat_gateway_eip.Ref())) t.add_output( Output("NatGatewayEIPAllocationId", Value=self.nat_gateway_eip.GetAtt("AllocationId"))) self.nat_gateway = t.add_resource( ec2.NatGateway( "NatGateway", AllocationId=self.nat_gateway_eip.GetAtt("AllocationId"), SubnetId=self.subnet.Ref())) t.add_output(Output("NatGatewayId", Value=self.nat_gateway.Ref())) def create_default_route(self): t = self.template if (self.internet_gateway_id is NoValue and self.nat_gateway_id is NoValue): # Don't create a default route if no gateway is provided # this is a totally private, unreachable network. return self.default_route = t.add_resource( ec2.Route( "DefaultRoute", RouteTableId=self.route_table.Ref(), DestinationCidrBlock="0.0.0.0/0", GatewayId=self.internet_gateway_id, NatGatewayId=self.nat_gateway_id, )) t.add_output(Output("DefaultRouteId", Value=self.default_route.Ref())) def create_extra_routes(self): t = self.template for route in self.routes: title = route.title route.properties.update({"RouteTableId": self.route_table.Ref()}) added_route = t.add_resource(route) t.add_output(Output(title, Value=added_route.Ref())) def validate_variables(self): variables = self.get_variables() if (self.internet_gateway_id is not NoValue and self.nat_gateway_id is not NoValue): raise ValueError("Cannot specify both NatGatewayId and " "InternetGatewayId in the same Network stack.") if (variables["CreateNatGateway"] and self.nat_gateway_id is not NoValue): raise ValueError("Cannot specify both CreateNatGateway as True " "and NatGatewayId in the same Network stack.") def create_template(self): self.validate_variables() self.create_subnet() self.create_route_table() self.create_nat_gateway() self.create_default_route() self.create_extra_routes()