Ejemplo n.º 1
0
    def build_block_device(self):
        docker_volume = autoscaling.BlockDeviceMapping(
            DeviceName="/dev/sdh",
            Ebs=autoscaling.EBSBlockDevice(DeleteOnTermination=True,
                                           VolumeSize=Ref("DockerVolumeSize")))
        swap_volume = autoscaling.BlockDeviceMapping(
            DeviceName="/dev/sdi",
            Ebs=autoscaling.EBSBlockDevice(DeleteOnTermination=True,
                                           VolumeSize=Ref("SwapVolumeSize")))

        return [docker_volume, swap_volume]
Ejemplo n.º 2
0
    def create_template(self) -> None:
        """Create template (main function called by Stacker)."""
        template = self.template
        template.add_version("2010-09-09")
        template.add_description(
            "Kubernetes workers via EKS - V1.0.0 "
            "- compatible with amazon-eks-node-v23+"
        )

        # Metadata
        template.add_metadata(
            {
                "AWS::CloudFormation::Interface": {
                    "ParameterGroups": [
                        {
                            "Label": {"default": "EKS Cluster"},
                            "Parameters": [
                                self.variables[i].name
                                for i in [
                                    "ClusterName",
                                    "ClusterControlPlaneSecurityGroup",
                                ]
                            ],
                        },
                        {
                            "Label": {"default": "Worker Node Configuration"},
                            "Parameters": [
                                self.variables[i].name
                                for i in [
                                    "NodeGroupName",
                                    "NodeAutoScalingGroupMinSize",
                                    "NodeAutoScalingGroupMaxSize",
                                    "UseDesiredInstanceCount",
                                    "NodeInstanceType",
                                    "NodeInstanceProfile",
                                    "NodeImageId",
                                    "NodeVolumeSize",
                                    "KeyName",
                                    "UseSpotInstances",
                                    "SpotBidPrice",
                                    "BootstrapArguments",
                                ]
                            ],
                        },
                        {
                            "Label": {"default": "Worker Network Configuration"},
                            "Parameters": [
                                self.variables[i].name for i in ["VpcId", "Subnets"]
                            ],
                        },
                    ]
                }
            }
        )

        # Conditions
        template.add_condition(
            "SetSpotPrice", Equals(self.variables["UseSpotInstances"].ref, "yes")
        )
        template.add_condition(
            "DesiredInstanceCountSpecified",
            Equals(self.variables["UseDesiredInstanceCount"].ref, "true"),
        )
        template.add_condition(
            "KeyNameSpecified", Not(Equals(self.variables["KeyName"].ref, ""))
        )

        # Resources
        nodesecuritygroup = template.add_resource(
            ec2.SecurityGroup(
                "NodeSecurityGroup",
                GroupDescription="Security group for all nodes in the cluster",
                Tags=[
                    {
                        "Key": Sub("kubernetes.io/cluster/${ClusterName}"),
                        "Value": "owned",
                    },
                ],
                VpcId=self.variables["VpcId"].ref,
            )
        )
        template.add_output(
            Output(
                "NodeSecurityGroup",
                Description="Security group for all nodes in the cluster",
                Value=nodesecuritygroup.ref(),
            )
        )
        template.add_resource(
            ec2.SecurityGroupIngress(
                "NodeSecurityGroupIngress",
                Description="Allow node to communicate with each other",
                GroupId=nodesecuritygroup.ref(),
                SourceSecurityGroupId=nodesecuritygroup.ref(),
                IpProtocol="-1",
                FromPort=0,
                ToPort=65535,
            )
        )
        template.add_resource(
            ec2.SecurityGroupIngress(
                "NodeSecurityGroupFromControlPlaneIngress",
                Description="Allow worker Kubelets and pods to receive "
                "communication from the cluster control plane",
                GroupId=nodesecuritygroup.ref(),
                SourceSecurityGroupId=self.variables[
                    "ClusterControlPlaneSecurityGroup"
                ].ref,  # noqa
                IpProtocol="tcp",
                FromPort=1025,
                ToPort=65535,
            )
        )
        template.add_resource(
            ec2.SecurityGroupEgress(
                "ControlPlaneEgressToNodeSecurityGroup",
                Description="Allow the cluster control plane to communicate "
                "with worker Kubelet and pods",
                GroupId=self.variables["ClusterControlPlaneSecurityGroup"].ref,
                DestinationSecurityGroupId=nodesecuritygroup.ref(),
                IpProtocol="tcp",
                FromPort=1025,
                ToPort=65535,
            )
        )
        template.add_resource(
            ec2.SecurityGroupIngress(
                "NodeSecurityGroupFromControlPlaneOn443Ingress",
                Description="Allow pods running extension API servers on port "
                "443 to receive communication from cluster "
                "control plane",
                GroupId=nodesecuritygroup.ref(),
                SourceSecurityGroupId=self.variables[
                    "ClusterControlPlaneSecurityGroup"
                ].ref,  # noqa
                IpProtocol="tcp",
                FromPort=443,
                ToPort=443,
            )
        )
        template.add_resource(
            ec2.SecurityGroupEgress(
                "ControlPlaneEgressToNodeSecurityGroupOn443",
                Description="Allow the cluster control plane to communicate "
                "with pods running extension API servers on port "
                "443",
                GroupId=self.variables["ClusterControlPlaneSecurityGroup"].ref,
                DestinationSecurityGroupId=nodesecuritygroup.ref(),
                IpProtocol="tcp",
                FromPort=443,
                ToPort=443,
            )
        )
        template.add_resource(
            ec2.SecurityGroupIngress(
                "ClusterControlPlaneSecurityGroupIngress",
                Description="Allow pods to communicate with the cluster API " "Server",
                GroupId=self.variables["ClusterControlPlaneSecurityGroup"].ref,
                SourceSecurityGroupId=nodesecuritygroup.ref(),
                IpProtocol="tcp",
                FromPort=443,
                ToPort=443,
            )
        )

        nodelaunchconfig = template.add_resource(
            autoscaling.LaunchConfiguration(
                "NodeLaunchConfig",
                AssociatePublicIpAddress=True,
                IamInstanceProfile=self.variables["NodeInstanceProfile"].ref,
                ImageId=self.variables["NodeImageId"].ref,
                InstanceType=self.variables["NodeInstanceType"].ref,
                KeyName=If("KeyNameSpecified", self.variables["KeyName"].ref, NoValue),
                SecurityGroups=[nodesecuritygroup.ref()],
                SpotPrice=If(
                    "SetSpotPrice", self.variables["SpotBidPrice"].ref, NoValue
                ),
                BlockDeviceMappings=[
                    autoscaling.BlockDeviceMapping(
                        DeviceName="/dev/xvda",
                        Ebs=autoscaling.EBSBlockDevice(
                            VolumeSize=self.variables["NodeVolumeSize"].ref,
                            VolumeType="gp2",
                            DeleteOnTermination=True,
                        ),
                    )
                ],
                UserData=Base64(
                    Sub(
                        "\n".join(
                            [
                                "#!/bin/bash",
                                "set -o xtrace",
                                "/etc/eks/bootstrap.sh ${ClusterName} ${BootstrapArguments}",
                                "/opt/aws/bin/cfn-signal --exit-code $? \\",
                                "--stack ${AWS::StackName} \\",
                                "--resource NodeGroup \\",
                                "--region ${AWS::Region}",
                            ]
                        )
                    )
                ),
            )
        )

        template.add_resource(
            autoscaling.AutoScalingGroup(
                "NodeGroup",
                DesiredCapacity=If(
                    "DesiredInstanceCountSpecified",
                    self.variables["NodeAutoScalingGroupMaxSize"].ref,
                    NoValue,
                ),
                LaunchConfigurationName=nodelaunchconfig.ref(),
                MinSize=self.variables["NodeAutoScalingGroupMinSize"].ref,
                MaxSize=self.variables["NodeAutoScalingGroupMaxSize"].ref,
                VPCZoneIdentifier=self.variables["Subnets"].ref,
                Tags=[
                    autoscaling.Tag(
                        "Name", Sub("${ClusterName}-${NodeGroupName}-Node"), True
                    ),
                    autoscaling.Tag(
                        Sub("kubernetes.io/cluster/${ClusterName}"), "owned", True
                    ),
                ],
                UpdatePolicy=UpdatePolicy(
                    AutoScalingRollingUpdate=AutoScalingRollingUpdate(
                        MinInstancesInService="1", MaxBatchSize="1"
                    )
                ),
            )
        )
Ejemplo n.º 3
0
instance_configuration_name = "LaunchConfiguration"

autoscaling_group_name = "AutoScalingGroup"

container_instance_configuration = autoscaling.LaunchConfiguration(
    instance_configuration_name,
    template=template,
    SecurityGroups=[Ref(container_security_group)],
    InstanceType=container_instance_type,
    ImageId=ami,
    IamInstanceProfile=Ref(container_instance_profile),
    BlockDeviceMappings=[
        autoscaling.BlockDeviceMapping(DeviceName="/dev/sda1",
                                       Ebs=autoscaling.EBSBlockDevice(
                                           VolumeType="gp2",
                                           VolumeSize=container_volume_size,
                                           Encrypted=use_aes256_encryption,
                                       )),
    ],
    KeyName=Ref(key_name),
)

autoscaling_group = autoscaling.AutoScalingGroup(
    autoscaling_group_name,
    template=template,
    VPCZoneIdentifier=[Ref(private_subnet_a),
                       Ref(private_subnet_b)],
    MinSize=desired_container_instances,
    MaxSize=max_container_instances,
    DesiredCapacity=desired_container_instances,
    LaunchConfigurationName=Ref(container_instance_configuration),
Ejemplo n.º 4
0
 def build_block_device(self):
     volume = autoscaling.EBSBlockDevice(VolumeSize="50")
     return [
         autoscaling.BlockDeviceMapping(DeviceName="/dev/sdh", Ebs=volume)
     ]
Ejemplo n.º 5
0
    def create_template(self):
        """Create template (main function called by Stacker)."""
        template = self.template
        variables = self.get_variables()
        template.add_version('2010-09-09')
        template.add_description('Kubernetes workers via EKS - V1.0.0 '
                                 '- compatible with amazon-eks-node-v23+')

        # Metadata
        template.add_metadata({
            'AWS::CloudFormation::Interface': {
                'ParameterGroups': [
                    {'Label': {'default': 'EKS Cluster'},
                     'Parameters': [variables[i].name
                                    for i
                                    in ['ClusterName',
                                        'ClusterControlPlaneSecurityGroup']]},
                    {'Label': {'default': 'Worker Node Configuration'},
                     'Parameters': [variables[i].name
                                    for i
                                    in ['NodeGroupName',
                                        'NodeAutoScalingGroupMinSize',
                                        'NodeAutoScalingGroupMaxSize',
                                        'UseDesiredInstanceCount',
                                        'NodeInstanceType',
                                        'NodeInstanceProfile',
                                        'NodeImageId',
                                        'NodeVolumeSize',
                                        'KeyName',
                                        'UseSpotInstances',
                                        'SpotBidPrice',
                                        'BootstrapArguments']]},
                    {'Label': {'default': 'Worker Network Configuration'},
                     'Parameters': [variables[i].name
                                    for i
                                    in ['VpcId', 'Subnets']]}
                ]
            }
        })

        # Conditions
        template.add_condition(
            'SetSpotPrice',
            Equals(variables['UseSpotInstances'].ref, 'yes')
        )
        template.add_condition(
            'DesiredInstanceCountSpecified',
            Equals(variables['UseDesiredInstanceCount'].ref, 'true')
        )
        template.add_condition(
            'KeyNameSpecified',
            Not(Equals(variables['KeyName'].ref, ''))
        )

        # Resources
        nodesecuritygroup = template.add_resource(
            ec2.SecurityGroup(
                'NodeSecurityGroup',
                GroupDescription='Security group for all nodes in the cluster',
                Tags=[
                    {'Key': Sub('kubernetes.io/cluster/${ClusterName}'),
                     'Value': 'owned'},
                ],
                VpcId=variables['VpcId'].ref
            )
        )
        template.add_output(
            Output(
                'NodeSecurityGroup',
                Description='Security group for all nodes in the cluster',
                Value=nodesecuritygroup.ref()
            )
        )
        template.add_resource(
            ec2.SecurityGroupIngress(
                'NodeSecurityGroupIngress',
                Description='Allow node to communicate with each other',
                GroupId=nodesecuritygroup.ref(),
                SourceSecurityGroupId=nodesecuritygroup.ref(),
                IpProtocol='-1',
                FromPort=0,
                ToPort=65535
            )
        )
        template.add_resource(
            ec2.SecurityGroupIngress(
                'NodeSecurityGroupFromControlPlaneIngress',
                Description='Allow worker Kubelets and pods to receive '
                            'communication from the cluster control plane',
                GroupId=nodesecuritygroup.ref(),
                SourceSecurityGroupId=variables['ClusterControlPlaneSecurityGroup'].ref,  # noqa
                IpProtocol='tcp',
                FromPort=1025,
                ToPort=65535
            )
        )
        template.add_resource(
            ec2.SecurityGroupEgress(
                'ControlPlaneEgressToNodeSecurityGroup',
                Description='Allow the cluster control plane to communicate '
                            'with worker Kubelet and pods',
                GroupId=variables['ClusterControlPlaneSecurityGroup'].ref,
                DestinationSecurityGroupId=nodesecuritygroup.ref(),
                IpProtocol='tcp',
                FromPort=1025,
                ToPort=65535
            )
        )
        template.add_resource(
            ec2.SecurityGroupIngress(
                'NodeSecurityGroupFromControlPlaneOn443Ingress',
                Description='Allow pods running extension API servers on port '
                            '443 to receive communication from cluster '
                            'control plane',
                GroupId=nodesecuritygroup.ref(),
                SourceSecurityGroupId=variables['ClusterControlPlaneSecurityGroup'].ref,  # noqa
                IpProtocol='tcp',
                FromPort=443,
                ToPort=443
            )
        )
        template.add_resource(
            ec2.SecurityGroupEgress(
                'ControlPlaneEgressToNodeSecurityGroupOn443',
                Description='Allow the cluster control plane to communicate '
                            'with pods running extension API servers on port '
                            '443',
                GroupId=variables['ClusterControlPlaneSecurityGroup'].ref,
                DestinationSecurityGroupId=nodesecuritygroup.ref(),
                IpProtocol='tcp',
                FromPort=443,
                ToPort=443
            )
        )
        template.add_resource(
            ec2.SecurityGroupIngress(
                'ClusterControlPlaneSecurityGroupIngress',
                Description='Allow pods to communicate with the cluster API '
                            'Server',
                GroupId=variables['ClusterControlPlaneSecurityGroup'].ref,
                SourceSecurityGroupId=nodesecuritygroup.ref(),
                IpProtocol='tcp',
                FromPort=443,
                ToPort=443
            )
        )

        nodelaunchconfig = template.add_resource(
            autoscaling.LaunchConfiguration(
                'NodeLaunchConfig',
                AssociatePublicIpAddress=True,
                IamInstanceProfile=variables['NodeInstanceProfile'].ref,
                ImageId=variables['NodeImageId'].ref,
                InstanceType=variables['NodeInstanceType'].ref,
                KeyName=If(
                    'KeyNameSpecified',
                    variables['KeyName'].ref,
                    NoValue
                ),
                SecurityGroups=[nodesecuritygroup.ref()],
                SpotPrice=If('SetSpotPrice',
                             variables['SpotBidPrice'].ref,
                             NoValue),
                BlockDeviceMappings=[autoscaling.BlockDeviceMapping(
                    DeviceName='/dev/xvda',
                    Ebs=autoscaling.EBSBlockDevice(
                        VolumeSize=variables['NodeVolumeSize'].ref,
                        VolumeType='gp2',
                        DeleteOnTermination=True
                    )
                )],
                UserData=Base64(
                    Sub('\n'.join([
                        '#!/bin/bash',
                        'set -o xtrace',
                        '/etc/eks/bootstrap.sh ${ClusterName} ${BootstrapArguments}',  # noqa
                        '/opt/aws/bin/cfn-signal --exit-code $? \\',
                        '--stack ${AWS::StackName} \\',
                        '--resource NodeGroup \\',
                        '--region ${AWS::Region}'
                    ]))
                )
            )
        )

        template.add_resource(
            autoscaling.AutoScalingGroup(
                'NodeGroup',
                DesiredCapacity=If(
                    'DesiredInstanceCountSpecified',
                    variables['NodeAutoScalingGroupMaxSize'].ref,
                    NoValue
                ),
                LaunchConfigurationName=nodelaunchconfig.ref(),
                MinSize=variables['NodeAutoScalingGroupMinSize'].ref,
                MaxSize=variables['NodeAutoScalingGroupMaxSize'].ref,
                VPCZoneIdentifier=variables['Subnets'].ref,
                Tags=[
                    autoscaling.Tag(
                        'Name',
                        Sub('${ClusterName}-${NodeGroupName}-Node'),
                        True),
                    autoscaling.Tag(
                        Sub('kubernetes.io/cluster/${ClusterName}'),
                        'owned',
                        True)
                ],
                UpdatePolicy=UpdatePolicy(
                    AutoScalingRollingUpdate=AutoScalingRollingUpdate(
                        MinInstancesInService='1',
                        MaxBatchSize='1'
                    )
                )
            )
        )
    def init_template(self):
        self.template.add_description(self.TEMPLATE_DESCRIPTION)

        ecs_cluster = self.template.add_resource(Cluster(self.CLUSTER_NAME))

        ecs_instance_role = self.template.add_resource(
            Role('sitInstanceRole',
                 Path='/',
                 AssumeRolePolicyDocument={
                     "Statement": [{
                         "Effect": "Allow",
                         "Principal": {
                             "Service": ["ec2.amazonaws.com"]
                         },
                         "Action": ["sts:AssumeRole"]
                     }]
                 }))

        ecs_instance_profile = self.template.add_resource(
            InstanceProfile('sitInstanceProfile',
                            Path='/',
                            Roles=[Ref(ecs_instance_role)]))

        ecs_instance_policy = self.template.add_resource(
            PolicyType('sitInstancePolicy',
                       PolicyName='ecs-policy',
                       Roles=[Ref(ecs_instance_role)],
                       PolicyDocument={
                           "Statement": [{
                               "Effect":
                               "Allow",
                               "Action": [
                                   "ecs:CreateCluster",
                                   "ecs:RegisterContainerInstance",
                                   "ecs:DeregisterContainerInstance",
                                   "ecs:DiscoverPollEndpoint", "ecs:Submit*",
                                   "ecs:Poll", "ecs:StartTelemetrySession",
                                   "ecr:GetAuthorizationToken",
                                   "ecr:BatchCheckLayerAvailability",
                                   "ecr:GetDownloadUrlForLayer",
                                   "ecr:BatchGetImage", "logs:CreateLogStream",
                                   "logs:PutLogEvents"
                               ],
                               "Resource":
                               "*"
                           }],
                       }))

        commands = {
            '01_add_instance_to_cluster': {
                'command':
                Join('', [
                    '#!/bin/bash\n', 'echo ECS_CLUSTER=',
                    Ref(ecs_cluster),
                    '$"\n"ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION=',
                    self.ECS_TASK_CLEANUP_WAIT, ' >> /etc/ecs/ecs.config'
                ])
            }
        }

        files = {
            "/etc/cfn/cfn-hup.conf": {
                "content":
                Join("", [
                    "[main]\n", "stack=",
                    Ref("AWS::StackId"), "\n", "region=",
                    Ref("AWS::Region"), "\n"
                ]),
                "mode":
                "000400",
                "owner":
                "root",
                "group":
                "root"
            },
            "/etc/cfn/hooks.d/cfn-auto-reloader.conf": {
                "content":
                Join("", [
                    "[cfn-auto-reloader-hook]\n", "triggers=post.update\n",
                    "path=Resources.{0}.Metadata.AWS::CloudFormation::Init\n".
                    format(self.LAUNCH_CONFIGURATION_NAME),
                    "action=/opt/aws/bin/cfn-init -v ", "         --stack ",
                    Ref("AWS::StackName"), "         --resource {0}".format(
                        self.LAUNCH_CONFIGURATION_NAME), "         --region ",
                    Ref("AWS::Region"), "\n", "runas=root\n"
                ])
            }
        }

        services = {
            "sysvinit": {
                "cfn-hup": {
                    "enabled":
                    "true",
                    "ensureRunning":
                    "true",
                    "files": [
                        "/etc/cfn/cfn-hup.conf",
                        "/etc/cfn/hooks.d/cfn-auto-reloader.conf"
                    ]
                }
            }
        }

        launch_configuration = self.template.add_resource(
            LaunchConfiguration(self.LAUNCH_CONFIGURATION_NAME,
                                ImageId=self.AMI_ID,
                                IamInstanceProfile=Ref(ecs_instance_profile),
                                InstanceType=self.INSTANCE_TYPE,
                                UserData=self.user_data.get_base64_data(),
                                AssociatePublicIpAddress=False,
                                SecurityGroups=self.SECURITY_GROUPS,
                                KeyName=self.KEY_NAME,
                                Metadata=autoscaling.Metadata(
                                    cloudformation.Init({
                                        "config":
                                        cloudformation.InitConfig(
                                            commands=commands,
                                            files=files,
                                            services=services)
                                    })),
                                BlockDeviceMappings=[
                                    autoscaling.BlockDeviceMapping(
                                        DeviceName=self.EBS_DEVICE_NAME,
                                        Ebs=autoscaling.EBSBlockDevice(
                                            DeleteOnTermination=True,
                                            VolumeSize=self.EBS_VOLUME_SIZE,
                                            VolumeType='gp2'))
                                ]))

        auto_scaling_group = self.template.add_resource(
            AutoScalingGroup(self.AUTOSCALING_GROUP_NAME,
                             MaxSize=self.MAX_SIZE,
                             MinSize=self.MIN_SIZE,
                             Cooldown=60,
                             LaunchConfigurationName=Ref(launch_configuration),
                             VPCZoneIdentifier=[self.SUBNET]))
        """ Scale UP Policy """
        scaling_up_policy = self.template.add_resource(
            ScalingPolicy('{0}ScaleUpPolicy'.format(
                self.AUTOSCALING_GROUP_NAME),
                          AdjustmentType='ChangeInCapacity',
                          AutoScalingGroupName=Ref(auto_scaling_group),
                          Cooldown=60,
                          ScalingAdjustment='1'))

        for alarm_name, alarm in self.AUTOSCALE_UP_ALARMS.iteritems():
            """ Cloud Watch Alarm """
            self.template.add_resource(
                Alarm('{0}ScaleUp{1}'.format(self.AUTOSCALING_GROUP_NAME,
                                             alarm_name),
                      ActionsEnabled=True,
                      Namespace='AWS/ECS',
                      MetricName=alarm['scaling_metric'],
                      ComparisonOperator='GreaterThanOrEqualToThreshold',
                      Threshold=alarm['scale_up_threshold'],
                      EvaluationPeriods=1,
                      Statistic=alarm['statistic'],
                      Period=alarm['period'],
                      AlarmActions=[Ref(scaling_up_policy)],
                      Dimensions=[
                          MetricDimension(Name='ClusterName',
                                          Value=Ref(ecs_cluster))
                      ]))
        """ Scale DOWN Policy """
        scaling_down_policy = self.template.add_resource(
            ScalingPolicy('{0}ScaleDownPolicy'.format(
                self.AUTOSCALING_GROUP_NAME),
                          AdjustmentType='ChangeInCapacity',
                          AutoScalingGroupName=Ref(auto_scaling_group),
                          Cooldown=60,
                          ScalingAdjustment='-1'))

        for alarm_name, alarm in self.AUTOSCALE_DOWN_ALARMS.iteritems():
            """ Cloud Watch Alarm """
            self.template.add_resource(
                Alarm('{0}ScaleDown{1}'.format(self.AUTOSCALING_GROUP_NAME,
                                               alarm_name),
                      ActionsEnabled=True,
                      Namespace='AWS/ECS',
                      MetricName=alarm['scaling_metric'],
                      ComparisonOperator='LessThanOrEqualToThreshold',
                      Threshold=alarm['scale_down_threshold'],
                      EvaluationPeriods=1,
                      Statistic=alarm['statistic'],
                      Period=alarm['period'],
                      AlarmActions=[Ref(scaling_down_policy)],
                      Dimensions=[
                          MetricDimension(Name='ClusterName',
                                          Value=Ref(ecs_cluster))
                      ]))
Ejemplo n.º 7
0
    def __init__(self, title, UserDataApp, spot=None, **kwargs):
        super().__init__(title, **kwargs)

        if spot:
            AutoScalingGroupName = 'AutoScalingGroupSpot'
            self.Condition = 'SpotASG'
        else:
            AutoScalingGroupName = 'AutoScalingGroup'
        self.AssociatePublicIpAddress = get_endvalue(
            'AssociatePublicIpAddress')
        self.BlockDeviceMappings = [
            asg.BlockDeviceMapping(DeviceName='/dev/xvda',
                                   Ebs=asg.EBSBlockDevice(
                                       VolumeSize=get_endvalue('VolumeSize'),
                                       VolumeType=get_endvalue('VolumeType'),
                                   )),
            If(
                'AdditionalStorage',
                asg.BlockDeviceMapping(
                    DeviceName=get_endvalue('AdditionalStorageName'),
                    Ebs=asg.EBSBlockDevice(
                        VolumeSize=get_endvalue('AdditionalStorageSize'),
                        VolumeType=get_endvalue('AdditionalStorageType'),
                    )), Ref('AWS::NoValue')),
            If(
                'InstaceEphemeral0',
                asg.BlockDeviceMapping(DeviceName='/dev/xvdb',
                                       VirtualName='ephemeral0'),
                Ref('AWS::NoValue')),
            If(
                'InstaceEphemeral1',
                asg.BlockDeviceMapping(DeviceName='/dev/xvdc',
                                       VirtualName='ephemeral1'),
                Ref('AWS::NoValue')),
            If(
                'InstaceEphemeral2',
                asg.BlockDeviceMapping(DeviceName='/dev/xvdd',
                                       VirtualName='ephemeral2'),
                Ref('AWS::NoValue')),
        ]
        self.IamInstanceProfile = Ref('InstanceProfile')
        self.ImageId = If(
            'ImageIdLatest',
            Ref('ImageIdLatest'),
            get_endvalue('ImageId'),
        ) if 'ImageIdLatest' in cfg.Parameter else get_endvalue('ImageId')
        self.InstanceMonitoring = get_endvalue('InstanceMonitoring')
        self.InstanceType = get_endvalue('InstanceType')
        self.KeyName = get_endvalue('KeyName')
        self.SecurityGroups = [
            GetAtt('SecurityGroupInstancesRules', 'GroupId'),
        ]
        self.SpotPrice = If('SpotPrice', get_endvalue('SpotPrice'),
                            Ref('AWS::NoValue'))
        self.UserData = Base64(
            Join('', [
                '#!/bin/bash\n',
                'PATH=/opt/aws/bin:$PATH\n',
                'export BASH_ENV=/etc/profile.d/ibox_env.sh\n',
                'export ENV=$BASH_ENV\n',
                'yum -C list installed aws-cfn-bootstrap || '
                'yum install -y aws-cfn-bootstrap\n',
                Sub(''.join(UserDataApp)),
                'cfn-init -v',
                ' --stack ',
                Ref('AWS::StackName'),
                ' --role ',
                Ref('RoleInstance'),
                ' --resource LaunchConfiguration',
                ' --region ',
                Ref('AWS::Region'),
                '\n',
                If(
                    'DoNotSignal', Ref('AWS::NoValue'),
                    Sub('cfn-signal -e $? --stack ${AWS::StackName} '
                        '--role ${RoleInstance} '
                        f'--resource {AutoScalingGroupName} '
                        '--region ${AWS::Region}\n')),
                'rm /var/lib/cloud/instance/sem/config_scripts_user\n',
            ]))