Esempio n. 1
0
 def test_sub_without_vars(self):
     s = 'foo ${AWS::Region} ${sub1} ${sub2}'
     values = {'sub1': 'uno', 'sub2': 'dos'}
     raw = Sub(s, **values)
     actual = raw.to_dict()
     expected = {'Fn::Sub': ['foo ${AWS::Region} ${sub1} ${sub2}', values]}
     self.assertEqual(expected, actual)
Esempio n. 2
0
 def test_sub_with_vars_mix(self):
     s = 'foo ${AWS::Region} ${sub1} ${sub2} ${sub3}'
     values = {'sub1': 'uno', 'sub2': 'dos'}
     raw = Sub(s, values, sub3='tres')
     actual = raw.to_dict()
     expected = {
         'Fn::Sub': [
             'foo ${AWS::Region} ${sub1} ${sub2} ${sub3}',
             {
                 'sub1': 'uno',
                 'sub2': 'dos',
                 'sub3': 'tres'
             }
         ]
     }
     self.assertEqual(expected, actual)
            ComparisonOperator=value['operator'],
            AlarmActions=[
                Ref("{}{}".format(value['alarmPrefix'], reservation))]
        ))
        t.add_resource(ScalingPolicy(
            "{}{}".format(value['alarmPrefix'], reservation),
            ScalingAdjustment=value['adjustment'],
            AutoScalingGroupName=Ref("ECSAutoScalingGroup"),
            AdjustmentType="ChangeInCapacity",
        ))

t.add_output(Output(
    "Cluster",
    Description="ECS Cluster Name",
    Value=Ref("ECSCluster"),
    Export=Export(Sub("${AWS::StackName}-id")),
))

t.add_output(Output(
    "VpcId",
    Description="VpcId",
    Value=Ref("VpcId"),
    Export=Export(Sub("${AWS::StackName}-vpc-id")),
))

t.add_output(Output(
    "PublicSubnet",
    Description="PublicSubnet",
    Value=Join(',', Ref("PublicSubnet")),
    Export=Export(Sub("${AWS::StackName}-public-subnets")),
))
Esempio n. 4
0
 def test_sub_with_vars(self):
     s = 'foo ${AWS::Region}'
     raw = Sub(s)
     actual = raw.to_dict()
     expected = {'Fn::Sub': 'foo ${AWS::Region}'}
     self.assertEqual(expected, actual)
Esempio n. 5
0
def create_registry(family, namespace, port_config, settings):
    """
    Creates the settings for the ECS Service Registries and adds the resources to the appropriate template

    :param ecs_composex.ecs.ecs_family.ComposeFamily family:
    :param ecs_composex.cloudmap.cloudmap_stack.PrivateNamespace namespace:
    :param dict port_config:
    :param ecs_composex.common.settings.ComposeXSettings settings:
    """
    if family.ecs_service.registries:
        LOG.warn(f"{family.name} already has a CloudMap mapping. "
                 f"Only one can be set. Ignoring mapping to {namespace.name}")
        return
    if namespace.cfn_resource:
        add_parameters(
            family.template,
            [
                namespace.attributes_outputs[PRIVATE_NAMESPACE_ID]
                ["ImportParameter"]
            ],
        )
        family.stack.Parameters.update({
            namespace.attributes_outputs[PRIVATE_NAMESPACE_ID]["ImportParameter"].title:
            namespace.attributes_outputs[PRIVATE_NAMESPACE_ID]["ImportValue"]
        })
        namespace_id = Ref(namespace.attributes_outputs[PRIVATE_NAMESPACE_ID]
                           ["ImportParameter"])
    elif namespace.lookup_properties:
        add_update_mapping(
            family.template,
            namespace.module.mapping_key,
            settings.mappings[namespace.module.mapping_key],
        )
        namespace_id = namespace.attributes_outputs[PRIVATE_NAMESPACE_ID][
            "ImportValue"]
    else:
        raise AttributeError(
            f"{namespace.module.res_key}.{namespace.name} - Cannot define if new or lookup !?"
        )

    sd_service = SdService(
        f"{namespace.logical_name}EcsServiceDiscovery{family.logical_name}",
        Description=Sub(f"{family.name} service"),
        NamespaceId=namespace_id,
        HealthCheckCustomConfig=HealthCheckCustomConfig(FailureThreshold=1.0),
        DnsConfig=DnsConfig(
            RoutingPolicy="MULTIVALUE",
            NamespaceId=Ref(AWS_NO_VALUE),
            DnsRecords=[
                DnsRecord(TTL="15", Type="A"),
                DnsRecord(TTL="15", Type="SRV"),
            ],
        ),
        Name=family.family_hostname,
    )
    service_registry = ServiceRegistry(
        f"ServiceRegistry{port_config['target']}",
        RegistryArn=GetAtt(sd_service, "Arn"),
        Port=int(port_config["target"]),
    )
    add_resource(family.template, sd_service)
    family.ecs_service.registries.append(service_registry)
Esempio n. 6
0
    def add_envoy_container_definition(self, family):
        """
        Method to expand the containers configuration and add the Envoy SideCar.
        """
        proxy_config = ProxyConfiguration(
            ContainerName="envoy",
            Type="APPMESH",
            ProxyConfigurationProperties=[
                Environment(Name="IgnoredUID", Value="1337"),
                Environment(
                    Name="ProxyIngressPort",
                    Value="15000",
                ),
                Environment(Name="ProxyEgressPort", Value="15001"),
                Environment(Name="IgnoredGID", Value=""),
                Environment(
                    Name="EgressIgnoredIPs",
                    Value="169.254.170.2,169.254.169.254",
                ),
                Environment(Name="EgressIgnoredPorts", Value=""),
                Environment(
                    Name="AppPorts",
                    Value=",".join(
                        [f"{port.Port}" for port in self.port_mappings]),
                ),
            ],
        )

        envoy_service = ManagedSidecar(
            "envoy",
            {
                "image":
                "public.ecr.aws/appmesh/aws-appmesh-envoy:v1.21.1.1-prod",
                "user":
                "******",
                "deploy": {
                    "resources": {
                        "limits": {
                            "cpus": 0.125,
                            "memory": "256MB"
                        }
                    }
                },
                "environment": {
                    "ENABLE_ENVOY_XRAY_TRACING": 0,
                },
                "ports": [
                    {
                        "target": 15000,
                        "published": 15000,
                        "protocol": "tcp"
                    },
                    {
                        "target": 15001,
                        "published": 15001,
                        "protocol": "tcp"
                    },
                ],
                "healthcheck": {
                    "test": [
                        "CMD-SHELL",
                        "curl -s http://localhost:9901/server_info | grep state | grep -q LIVE",
                    ],
                    "interval":
                    "5s",
                    "timeout":
                    "2s",
                    "retries":
                    3,
                    "start_period":
                    10,
                },
                "ulimits": {
                    "nofile": {
                        "soft": 15000,
                        "hard": 15000
                    }
                },
                "x-iam": {
                    "Policies": [{
                        "PolicyName": "AppMeshAccess",
                        "PolicyDocument": {
                            "Version":
                            "2012-10-17",
                            "Statement": [
                                {
                                    "Sid": "AppMeshAccess",
                                    "Effect": "Allow",
                                    "Action":
                                    ["appmesh:StreamAggregatedResources"],
                                    "Resource": ["*"],
                                },
                                {
                                    "Sid":
                                    "ServiceDiscoveryAccess",
                                    "Effect":
                                    "Allow",
                                    "Action": [
                                        "servicediscovery:Get*",
                                        "servicediscovery:Describe*",
                                        "servicediscovery:List*",
                                        "servicediscovery:DiscoverInstances*",
                                    ],
                                    "Resource":
                                    "*",
                                },
                            ],
                        },
                    }]
                },
            },
        )
        envoy_service.container_definition.Environment.append(
            Environment(
                Name="APPMESH_VIRTUAL_NODE_NAME",
                Value=Sub(
                    f"mesh/${{{appmesh_params.MESH_NAME.title}}}/virtualNode/${{{self.node.title}.VirtualNodeName}}"
                ),
            ))
        family.add_managed_sidecar(envoy_service)
        setattr(family.task_definition, "ProxyConfiguration", proxy_config)
Esempio n. 7
0
def _subdomain_for_jenkins():
    return Sub('jenkins.${subdomain}')
Esempio n. 8
0
    def _add_service(self, service_name, config):
        env_config = build_config(
            self.env,
            self.application_name,
            self.env_sample_file_path
        )
        container_definition_arguments = {
            "Environment": [
                Environment(Name=k, Value=v) for (k, v) in env_config
            ],
            "Name": service_name + "Container",
            "Image": self.ecr_image_uri + ':' + self.current_version,
            "Essential": 'true',
            "LogConfiguration": self._gen_log_config(service_name),
            "MemoryReservation": int(config['memory_reservation']),
            "Cpu": 0
        }

        if 'http_interface' in config:
            container_definition_arguments['PortMappings'] = [
                PortMapping(
                    ContainerPort=int(
                        config['http_interface']['container_port']
                    )
                )
            ]

        if config['command'] is not None:
            container_definition_arguments['Command'] = [config['command']]

        cd = ContainerDefinition(**container_definition_arguments)

        task_role = self.template.add_resource(Role(
            service_name + "Role",
            AssumeRolePolicyDocument=PolicyDocument(
                Statement=[
                    Statement(
                        Effect=Allow,
                        Action=[AssumeRole],
                        Principal=Principal("Service", ["ecs-tasks.amazonaws.com"])
                    )
                ]
            )
        ))

        td = TaskDefinition(
            service_name + "TaskDefinition",
            Family=service_name + "Family",
            ContainerDefinitions=[cd],
            TaskRoleArn=Ref(task_role)
        )
        self.template.add_resource(td)
        desired_count = self._get_desired_task_count_for_service(service_name)
        deployment_configuration = DeploymentConfiguration(
            MinimumHealthyPercent=100,
            MaximumPercent=200
        )
        if 'http_interface' in config:
            alb, lb, service_listener = self._add_alb(cd, service_name, config)
            svc = Service(
                service_name,
                LoadBalancers=[lb],
                Cluster=self.cluster_name,
                Role=Ref(self.ecs_service_role),
                TaskDefinition=Ref(td),
                DesiredCount=desired_count,
                DependsOn=service_listener.title,
                PlacementStrategies=self.PLACEMENT_STRATEGIES
            )
            self.template.add_output(
                Output(
                    service_name + 'EcsServiceName',
                    Description='The ECS name which needs to be entered',
                    Value=GetAtt(svc, 'Name')
                )
            )
            self.template.add_output(
                Output(
                    service_name + "URL",
                    Description="The URL at which the service is accessible",
                    Value=Sub("https://${" + alb.name + ".DNSName}")
                )
            )
            self.template.add_resource(svc)
        else:
            svc = Service(
                service_name,
                Cluster=self.cluster_name,
                TaskDefinition=Ref(td),
                DesiredCount=desired_count,
                DeploymentConfiguration=deployment_configuration,
                PlacementStrategies=self.PLACEMENT_STRATEGIES
            )
            self.template.add_output(
                Output(
                    service_name + 'EcsServiceName',
                    Description='The ECS name which needs to be entered',
                    Value=GetAtt(svc, 'Name')
                )
            )
            self.template.add_resource(svc)
        self._add_service_alarms(svc)
Esempio n. 9
0
 def add_ecs_task(self):
     '''
     Add ECS Task
     '''
     self.cfn_template.add_resource(
         TaskDefinition(
             title=constants.TASK,
             Volumes=[Volume(Name='anchore_db_vol')],
             TaskRoleArn=GetAtt(constants.TASK_ROLE, 'Arn'),
             ContainerDefinitions=[
                 ContainerDefinition(
                     Name='anchore-engine',
                     Hostname='anchore-engine',
                     Cpu=int('512'),
                     MemoryReservation=int('1536'),
                     Essential=bool('true'),
                     Image=ImportValue(
                         Sub('${Environment}-${AnchoreEngineImage}')),
                     PortMappings=[
                         PortMapping(
                             ContainerPort=int('8228'),
                             HostPort=int('8228'),
                             Protocol='tcp',
                         ),
                         PortMapping(
                             ContainerPort=int('8338'),
                             HostPort=int('8338'),
                             Protocol='tcp',
                         ),
                     ],
                     DockerSecurityOptions=['apparmor:docker-default'],
                     Environment=[
                         Environment(Name='ANCHORE_HOST_ID',
                                     Value='anchore-engine'),
                         Environment(Name='ANCHORE_ENDPOINT_HOSTNAME',
                                     Value='anchore-engine'),
                         Environment(Name='ANCHORE_DB_HOST',
                                     Value='anchore-db'),
                         Environment(Name='ANCHORE_DB_PASSWORD',
                                     Value=Ref('AnchoreDBPassword')),
                         Environment(Name='AWS_DEFAULT_REGION',
                                     Value=Ref('AWS::Region')),
                         Environment(Name='region',
                                     Value=Ref('AWS::Region')),
                     ],
                     LogConfiguration=LogConfiguration(
                         LogDriver='awslogs',
                         Options={
                             "awslogs-group":
                             Ref('EngineLogGroup'),
                             "awslogs-region":
                             Ref('AWS::Region'),
                             "awslogs-stream-prefix":
                             Join('', ['anchore-engine', 'logs'])
                         }),
                     Links=['anchore-db']),
                 ContainerDefinition(
                     Name='anchore-db',
                     Hostname='anchore-db',
                     Cpu=int('512'),
                     MemoryReservation=int('1536'),
                     Essential=bool('true'),
                     Image=Ref('ArchoreDatabaseImage'),
                     PortMappings=[
                         PortMapping(
                             ContainerPort=int('5432'),
                             HostPort=int('5432'),
                             Protocol='tcp',
                         )
                     ],
                     DockerSecurityOptions=['apparmor:docker-default'],
                     MountPoints=[
                         MountPoint(ContainerPath=Ref('PGDATA'),
                                    SourceVolume='anchore_db_vol')
                     ],
                     Environment=[
                         Environment(Name='POSTGRES_PASSWORD',
                                     Value=Ref('AnchoreDBPassword')),
                         Environment(Name='PGDATA', Value=Ref('PGDATA')),
                         Environment(Name='AWS_DEFAULT_REGION',
                                     Value=Ref('AWS::Region')),
                         Environment(Name='region',
                                     Value=Ref('AWS::Region')),
                     ],
                     LogConfiguration=LogConfiguration(
                         LogDriver='awslogs',
                         Options={
                             "awslogs-group":
                             Ref('DatabaseLogGroup'),
                             "awslogs-region":
                             Ref('AWS::Region'),
                             "awslogs-stream-prefix":
                             Join('', ['anchore-db', 'logs'])
                         }))
             ]))
     return self.cfn_template
Esempio n. 10
0
def _subdomain_for_application():
    return Sub('${subdomain}')
Esempio n. 11
0
    def __init__(
        self,
        region: str,
        cidr_block: str,
        name: str = "VPC",
        internet_access_enabled: bool = True,
        internal_networks: list = [],
    ):
        """Create VPC, internet gateway, route tables and network ACLs

        Args:
            region (str): Region to use when setting up the VPC. The
                maximum number of subnets set up depends on the number
                of availability zones present in the region.
            cidr_block (str): IP range used by the VPC
            name (str, optional): VPC name. Defaults to "VPC".
            internet_access_enabled (bool, optional): If False, internet
                gateway will not be set up. Public network ACLs and
                route tables will still be created.
                Defaults to True.
            internal_networks (list, optional): IP ranges for private
                networks that this VPC will be connected to. They will
                be added to network ACLs. Defaults to [].
        """
        self.name = name
        self.region = region
        self.cidr_block = cidr_block
        self.internal_networks = internal_networks
        self.internet_access_enabled = internet_access_enabled
        self.public_subnets = []
        # Gateway subnets are public subnets hosting exit points like
        # NAT Gateway and VPC Endpoint interfaces
        self.gateway_subnets = []
        self.public_route_table = None
        self.natted_route_tables = []
        self.nat_gateways = []
        self._t = Template()  # Template
        self._r = dict()  # Resources
        self._o = dict()  # Outputs
        self._r["Vpc"] = t_ec2.VPC(
            title=f"{self.name}Vpc",
            CidrBlock=self.cidr_block,
            EnableDnsHostnames=True,
            EnableDnsSupport=True,
            Tags=[{
                "Key": "Name",
                "Value": self.name
            }],
        )
        self.vpc = self._r["Vpc"]
        self._o["VpcId"] = Output(
            title="VpcId",
            Value=Ref(self.vpc),
            Export=Export(Sub("${AWS::StackName}-vpc-id")),
        )
        if internet_access_enabled:
            # Create Internet Gateway
            title = "Igw"
            self._r[title] = t_ec2.InternetGateway(
                title=title,
                Tags=[{
                    "Key": "Name",
                    "Value": f"{self.name}-igw"
                }],
            )
            self._r["igw_attachment"] = t_ec2.VPCGatewayAttachment(
                title="IgwAttachment",
                VpcId=Ref(self.vpc),
                InternetGatewayId=Ref(self._r["Igw"]),
            )
        # Public routing table
        self._r["PubRouteTable"] = t_ec2.RouteTable(
            title="PubRouteTable",
            VpcId=Ref(self.vpc),
            Tags=[{
                "Key": "Name",
                "Value": "Public"
            }],
        )
        self.public_route_table = self._r["PubRouteTable"]
        if internet_access_enabled:
            self._r["pub_rtt_rt_pub"] = t_ec2.Route(
                title="PubRoute",
                RouteTableId=Ref(self._r["PubRouteTable"]),
                DestinationCidrBlock="0.0.0.0/0",
                GatewayId=Ref(self._r["Igw"]),
            )
        # Network ACL for public subnets
        self._r["PubNacl"] = t_ec2.NetworkAcl(
            title="PubNacl",
            VpcId=Ref(self.vpc),
            Tags=[{
                "Key": "Name",
                "Value": "Public"
            }],
        )
        self.public_nacl = self._r["PubNacl"]
        self._r["pub_nacl_out_all"] = t_ec2.NetworkAclEntry(
            title="PubNaclOutAll",
            NetworkAclId=Ref(self.public_nacl),
            Egress=True,
            RuleNumber=500,
            CidrBlock="0.0.0.0/0",
            Protocol=-1,
            RuleAction="allow",
        )
        self._r["pub_nacl_in_icmp"] = t_ec2.NetworkAclEntry(
            title="PubNaclInIcmp",
            NetworkAclId=Ref(self.public_nacl),
            Egress=False,
            RuleNumber=99,
            CidrBlock="0.0.0.0/0",
            Protocol=1,
            Icmp=t_ec2.ICMP(Code=-1, Type=-1),
            RuleAction="allow",
        )
        self._r["pub_nacl_in_vpc"] = t_ec2.NetworkAclEntry(
            title="PubNaclInVpc",
            NetworkAclId=Ref(self.public_nacl),
            Egress=False,
            RuleNumber=100,
            CidrBlock=GetAtt(self.vpc, "CidrBlock"),
            Protocol=-1,
            RuleAction="allow",
        )
        for index, cidr_block in enumerate(self.internal_networks):
            self._r[f"pub_nacl_in_internal_{index}"] = t_ec2.NetworkAclEntry(
                title=f"PubNaclInInternal{index}",
                NetworkAclId=Ref(self.public_nacl),
                Egress=False,
                RuleNumber=101 + index,
                CidrBlock=cidr_block,
                Protocol=-1,
                RuleAction="allow",
            )
        self._r["pub_nacl_in_ssh"] = t_ec2.NetworkAclEntry(
            title="PubNaclInSsh",
            NetworkAclId=Ref(self.public_nacl),
            Egress=False,
            RuleNumber=210,
            CidrBlock="0.0.0.0/0",
            Protocol=6,
            PortRange=t_ec2.PortRange(From=22, To=22),
            RuleAction="allow",
        )
        self._r["pub_nacl_in_http"] = t_ec2.NetworkAclEntry(
            title="PubNaclInHttp",
            NetworkAclId=Ref(self.public_nacl),
            Egress=False,
            RuleNumber=220,
            CidrBlock="0.0.0.0/0",
            Protocol=6,
            PortRange=t_ec2.PortRange(From=80, To=80),
            RuleAction="allow",
        )
        self._r["pub_nacl_in_https"] = t_ec2.NetworkAclEntry(
            title="PubNaclInHttps",
            NetworkAclId=Ref(self.public_nacl),
            Egress=False,
            RuleNumber=221,
            CidrBlock="0.0.0.0/0",
            Protocol=6,
            PortRange=t_ec2.PortRange(From=443, To=443),
            RuleAction="allow",
        )
        self._r["pub_nacl_in_nat_tcp"] = t_ec2.NetworkAclEntry(
            title="PubNaclInNatTcp",
            NetworkAclId=Ref(self.public_nacl),
            Egress=False,
            RuleNumber=500,
            CidrBlock="0.0.0.0/0",
            Protocol=6,
            PortRange=t_ec2.PortRange(From=1024, To=65535),
            RuleAction="allow",
        )
        self._r["pub_nacl_in_nat_udp"] = t_ec2.NetworkAclEntry(
            title="PubNaclInNatUdp",
            NetworkAclId=Ref(self.public_nacl),
            Egress=False,
            RuleNumber=501,
            CidrBlock="0.0.0.0/0",
            Protocol=17,
            PortRange=t_ec2.PortRange(From=1024, To=65535),
            RuleAction="allow",
        )
        # Network ACL for private subnets
        self._r["InternalNacl"] = t_ec2.NetworkAcl(
            title="InternalNacl",
            VpcId=Ref(self.vpc),
            Tags=[{
                "Key": "Name",
                "Value": "Private"
            }],
        )
        self.internal_nacl = self._r["InternalNacl"]
        self._r["internal_nacl_out_all"] = t_ec2.NetworkAclEntry(
            title="InternalNaclOutAll",
            NetworkAclId=Ref(self.internal_nacl),
            Egress=True,
            RuleNumber=500,
            CidrBlock="0.0.0.0/0",
            Protocol=-1,
            RuleAction="allow",
        )
        self._r["internal_nacl_in_icmp"] = t_ec2.NetworkAclEntry(
            title="InternalNaclInIcmp",
            NetworkAclId=Ref(self.internal_nacl),
            Egress=False,
            RuleNumber=99,
            CidrBlock="0.0.0.0/0",
            Protocol=1,
            Icmp=t_ec2.ICMP(Code=-1, Type=-1),
            RuleAction="allow",
        )
        self._r["internal_nacl_in_vpc"] = t_ec2.NetworkAclEntry(
            title="InternalNaclInVpc",
            NetworkAclId=Ref(self.internal_nacl),
            Egress=False,
            RuleNumber=100,
            CidrBlock=GetAtt(self.vpc, "CidrBlock"),
            Protocol=-1,
            RuleAction="allow",
        )
        for index, cidr_block in enumerate(self.internal_networks):
            self._r[
                f"internal_nacl_in_internal_{index}"] = t_ec2.NetworkAclEntry(
                    title=f"InternalNaclInInternal{index}",
                    NetworkAclId=Ref(self.internal_nacl),
                    Egress=False,
                    RuleNumber=101 + index,
                    CidrBlock=cidr_block,
                    Protocol=-1,
                    RuleAction="allow",
                )
        self._r["internal_nacl_in_nat_tcp"] = t_ec2.NetworkAclEntry(
            title="InternalNaclInNatTcp",
            NetworkAclId=Ref(self.internal_nacl),
            Egress=False,
            RuleNumber=500,
            CidrBlock="0.0.0.0/0",
            Protocol=6,
            PortRange=t_ec2.PortRange(From=1024, To=65535),
            RuleAction="allow",
        )
        self._r["internal_nacl_in_nat_udp"] = t_ec2.NetworkAclEntry(
            title="InternalNaclInNatUdp",
            NetworkAclId=Ref(self.internal_nacl),
            Egress=False,
            RuleNumber=501,
            CidrBlock="0.0.0.0/0",
            Protocol=17,
            PortRange=t_ec2.PortRange(From=1024, To=65535),
            RuleAction="allow",
        )
Esempio n. 12
0
                  'oracle-ee',
                  'oracle-se2',
                  'oracle-se1',
                  'oracle-se',
                  'postgres',
                  'sqlserver-ee',
                  'sqlserver-se',
                  'sqlserver-ex',
                  'sqlserver-web',
              ]))
# endregion

# region RDS
sec_group = template.add_resource(
    ec2.SecurityGroup('SecurityGroup',
                      GroupDescription=Sub('Default rules'),
                      SecurityGroupIngress=[
                          ec2.SecurityGroupRule(
                              CidrIp=Ref(allow_cidr),
                              FromPort=ec2.network_port(5432),
                              IpProtocol='tcp',
                              ToPort=ec2.network_port(5432),
                          )
                      ],
                      SecurityGroupEgress=[
                          ec2.SecurityGroupRule(
                              CidrIp=Ref(allow_cidr),
                              FromPort=ec2.network_port(5432),
                              IpProtocol='tcp',
                              ToPort=ec2.network_port(5432),
                          )
Esempio n. 13
0
    DeliverLogsPermissionArn=GetAtt(log_delivery_role, 'Arn'),
    LogGroupName=Ref(log_group),
    ResourceId=Ref(param_resource_id),
    ResourceType=Ref(param_resource_type),
    TrafficType=Ref(param_traffic_type),
))

#
# Output
#
t.add_output([
    Output(
        'LogGroupName',
        Description='Name of the log group.',
        Value=Ref(log_group),
        Export=Export(Sub('${AWS::StackName}-LogGroupName'))
    ),
    Output(
        'LogGroupArn',
        Description='Arn of the log group.',
        Value=GetAtt(log_group, 'Arn'),
        Export=Export(Sub('${AWS::StackName}-LogGroupArn'))
    ),
    Output(
        'FlowLog',
        Description='Flow log ID',
        Value=Ref(vpc_flow_log),
    )
])
#
# Write template
#!/usr/bin/env python3

from troposphere import codecommit, GetAtt, Sub
from troposphere import Template, Output

template = Template("""
Manage a Git repository with CodeCommit.

Template: git-template
Author: Carlos Avila <*****@*****.**>
""")

# region Resources
repository = template.add_resource(
    codecommit.Repository('Repository',
                          RepositoryName=Sub('${AWS::StackName}')))
# endregion

# region Outputs
template.add_output(
    Output('CloneUrlHttp', Value=GetAtt(repository, 'CloneUrlHttp')))
template.add_output(
    Output('CloneUrlSsh', Value=GetAtt(repository, 'CloneUrlSsh')))
# endregion

if __name__ == '__main__':
    print(template.to_json())
Esempio n. 15
0
        't3.small', 't3.medium',
        't3.large', 't3.xlarge',
    ],
))
t.set_parameter_label(instance_type, 'Instance Type')

###########################################
#             Security Group
###########################################

ec2_security_group = t.add_resource(SecurityGroup(
    'Ec2SecurityGroup',
    VpcId=Ref(vpc),
    GroupDescription='Security group for Minecraft EC2 instance.',
    Tags=Tags(
        Name=Sub('$minecraft-sg'),
    )
))

ec2_minecraft_tcp_ingress = t.add_resource(SecurityGroupIngress(
    'Ec2MinecraftTcpIngress',
    DependsOn='Ec2SecurityGroup',
    ToPort='25565',
    FromPort='25565',
    IpProtocol='tcp',
    GroupId=Ref(ec2_security_group),
    CidrIp=c.QUAD_ZERO,
))

ec2_minecraft_udp_ingress = t.add_resource(SecurityGroupIngress(
    'Ec2MinecraftUdpIngress',
           sns.Subscription(
               Endpoint=Select(5, Ref(param_alarm_emails)),
               Protocol='email'
           ),
           Ref(AWS_NO_VALUE)),

    ],
))

#
# Output
#
t.add_output([
    Output('TopicArn',
           Description='Topic arn',
           Value=Ref(alarm_topic),
           Export=Export(Sub('${AWS::StackName}-TopicArn')),
           ),
    Output('TopicName',
           Description='Topic name',
           Value=GetAtt(alarm_topic, 'TopicName'),
           Export=Export(Sub('${AWS::StackName}-TopicName')),
           ),
])

#
# Write
#
cfnutil.write(t, __file__.replace('Template.py', '.template.yaml'),
              write_yaml=True)
Esempio n. 17
0
    Type="String",
    AllowedPattern=utils.get_email_pattern()
))

admin_username = t.add_parameter(Parameter(
    "HyP3AdminUsername",
    Description="Username for the admin hyp3 user",
    Type="String",
    AllowedPattern="[a-zA-Z][a-zA-Z0-9]*"
))

ssm_hyp3_api_username_param_name = "HyP3ApiUsername"
ssm_hyp3_api_username = t.add_resource(SSMParameter(
    "HyP3SSMParameterHyP3ApiUsername",
    Name=Sub(
        "/${{StackName}}/{}".format(ssm_hyp3_api_username_param_name),
        StackName=Ref("AWS::StackName")
    ),
    Type="String",
    Value="♥"
))

ssm_hyp3_api_key_param_name = "HyP3ApiKey"
ssm_hyp3_api_key = t.add_resource(SSMParameter(
    "HyP3SSMParameterHyP3ApiKey",
    Name=Sub(
        "/${{StackName}}/{}".format(ssm_hyp3_api_key_param_name),
        StackName=Ref("AWS::StackName")
    ),
    Type="String",
    Value="♥"
))
Esempio n. 18
0
    def create_template(self):
        """Create template (main function called by Stacker)."""
        template = self.template
        variables = self.get_variables()
        template.add_version('2010-09-09')
        template.add_description('Kubernetes workers via EKS - V1.0.0 '
                                 '- compatible with amazon-eks-node-v23+')

        # Metadata
        template.add_metadata({
            'AWS::CloudFormation::Interface': {
                'ParameterGroups': [
                    {'Label': {'default': 'EKS Cluster'},
                     'Parameters': [variables[i].name
                                    for i
                                    in ['ClusterName',
                                        'ClusterControlPlaneSecurityGroup']]},
                    {'Label': {'default': 'Worker Node Configuration'},
                     'Parameters': [variables[i].name
                                    for i
                                    in ['NodeGroupName',
                                        'NodeAutoScalingGroupMinSize',
                                        'NodeAutoScalingGroupMaxSize',
                                        'UseDesiredInstanceCount',
                                        'NodeInstanceType',
                                        'NodeInstanceProfile',
                                        'NodeImageId',
                                        'NodeVolumeSize',
                                        'KeyName',
                                        'UseSpotInstances',
                                        'SpotBidPrice',
                                        'BootstrapArguments']]},
                    {'Label': {'default': 'Worker Network Configuration'},
                     'Parameters': [variables[i].name
                                    for i
                                    in ['VpcId', 'Subnets']]}
                ]
            }
        })

        # Conditions
        template.add_condition(
            'SetSpotPrice',
            Equals(variables['UseSpotInstances'].ref, 'yes')
        )
        template.add_condition(
            'DesiredInstanceCountSpecified',
            Equals(variables['UseDesiredInstanceCount'].ref, 'true')
        )
        template.add_condition(
            'KeyNameSpecified',
            Not(Equals(variables['KeyName'].ref, ''))
        )

        # Resources
        nodesecuritygroup = template.add_resource(
            ec2.SecurityGroup(
                'NodeSecurityGroup',
                GroupDescription='Security group for all nodes in the cluster',
                Tags=[
                    {'Key': Sub('kubernetes.io/cluster/${ClusterName}'),
                     'Value': 'owned'},
                ],
                VpcId=variables['VpcId'].ref
            )
        )
        template.add_output(
            Output(
                'NodeSecurityGroup',
                Description='Security group for all nodes in the cluster',
                Value=nodesecuritygroup.ref()
            )
        )
        template.add_resource(
            ec2.SecurityGroupIngress(
                'NodeSecurityGroupIngress',
                Description='Allow node to communicate with each other',
                GroupId=nodesecuritygroup.ref(),
                SourceSecurityGroupId=nodesecuritygroup.ref(),
                IpProtocol='-1',
                FromPort=0,
                ToPort=65535
            )
        )
        template.add_resource(
            ec2.SecurityGroupIngress(
                'NodeSecurityGroupFromControlPlaneIngress',
                Description='Allow worker Kubelets and pods to receive '
                            'communication from the cluster control plane',
                GroupId=nodesecuritygroup.ref(),
                SourceSecurityGroupId=variables['ClusterControlPlaneSecurityGroup'].ref,  # noqa
                IpProtocol='tcp',
                FromPort=1025,
                ToPort=65535
            )
        )
        template.add_resource(
            ec2.SecurityGroupEgress(
                'ControlPlaneEgressToNodeSecurityGroup',
                Description='Allow the cluster control plane to communicate '
                            'with worker Kubelet and pods',
                GroupId=variables['ClusterControlPlaneSecurityGroup'].ref,
                DestinationSecurityGroupId=nodesecuritygroup.ref(),
                IpProtocol='tcp',
                FromPort=1025,
                ToPort=65535
            )
        )
        template.add_resource(
            ec2.SecurityGroupIngress(
                'NodeSecurityGroupFromControlPlaneOn443Ingress',
                Description='Allow pods running extension API servers on port '
                            '443 to receive communication from cluster '
                            'control plane',
                GroupId=nodesecuritygroup.ref(),
                SourceSecurityGroupId=variables['ClusterControlPlaneSecurityGroup'].ref,  # noqa
                IpProtocol='tcp',
                FromPort=443,
                ToPort=443
            )
        )
        template.add_resource(
            ec2.SecurityGroupEgress(
                'ControlPlaneEgressToNodeSecurityGroupOn443',
                Description='Allow the cluster control plane to communicate '
                            'with pods running extension API servers on port '
                            '443',
                GroupId=variables['ClusterControlPlaneSecurityGroup'].ref,
                DestinationSecurityGroupId=nodesecuritygroup.ref(),
                IpProtocol='tcp',
                FromPort=443,
                ToPort=443
            )
        )
        template.add_resource(
            ec2.SecurityGroupIngress(
                'ClusterControlPlaneSecurityGroupIngress',
                Description='Allow pods to communicate with the cluster API '
                            'Server',
                GroupId=variables['ClusterControlPlaneSecurityGroup'].ref,
                SourceSecurityGroupId=nodesecuritygroup.ref(),
                IpProtocol='tcp',
                FromPort=443,
                ToPort=443
            )
        )

        nodelaunchconfig = template.add_resource(
            autoscaling.LaunchConfiguration(
                'NodeLaunchConfig',
                AssociatePublicIpAddress=True,
                IamInstanceProfile=variables['NodeInstanceProfile'].ref,
                ImageId=variables['NodeImageId'].ref,
                InstanceType=variables['NodeInstanceType'].ref,
                KeyName=If(
                    'KeyNameSpecified',
                    variables['KeyName'].ref,
                    NoValue
                ),
                SecurityGroups=[nodesecuritygroup.ref()],
                SpotPrice=If('SetSpotPrice',
                             variables['SpotBidPrice'].ref,
                             NoValue),
                BlockDeviceMappings=[autoscaling.BlockDeviceMapping(
                    DeviceName='/dev/xvda',
                    Ebs=autoscaling.EBSBlockDevice(
                        VolumeSize=variables['NodeVolumeSize'].ref,
                        VolumeType='gp2',
                        DeleteOnTermination=True
                    )
                )],
                UserData=Base64(
                    Sub('\n'.join([
                        '#!/bin/bash',
                        'set -o xtrace',
                        '/etc/eks/bootstrap.sh ${ClusterName} ${BootstrapArguments}',  # noqa
                        '/opt/aws/bin/cfn-signal --exit-code $? \\',
                        '--stack ${AWS::StackName} \\',
                        '--resource NodeGroup \\',
                        '--region ${AWS::Region}'
                    ]))
                )
            )
        )

        template.add_resource(
            autoscaling.AutoScalingGroup(
                'NodeGroup',
                DesiredCapacity=If(
                    'DesiredInstanceCountSpecified',
                    variables['NodeAutoScalingGroupMaxSize'].ref,
                    NoValue
                ),
                LaunchConfigurationName=nodelaunchconfig.ref(),
                MinSize=variables['NodeAutoScalingGroupMinSize'].ref,
                MaxSize=variables['NodeAutoScalingGroupMaxSize'].ref,
                VPCZoneIdentifier=variables['Subnets'].ref,
                Tags=[
                    autoscaling.Tag(
                        'Name',
                        Sub('${ClusterName}-${NodeGroupName}-Node'),
                        True),
                    autoscaling.Tag(
                        Sub('kubernetes.io/cluster/${ClusterName}'),
                        'owned',
                        True)
                ],
                UpdatePolicy=UpdatePolicy(
                    AutoScalingRollingUpdate=AutoScalingRollingUpdate(
                        MinInstancesInService='1',
                        MaxBatchSize='1'
                    )
                )
            )
        )
Esempio n. 19
0
    def _add_service(self, service_name, config):
        launch_type = self.LAUNCH_TYPE_FARGATE if 'fargate' in config else self.LAUNCH_TYPE_EC2
        env_config = build_config(self.env, self.application_name,
                                  self.env_sample_file_path)
        container_definition_arguments = {
            "Environment":
            [Environment(Name=k, Value=v) for (k, v) in env_config],
            "Name": service_name + "Container",
            "Image": self.ecr_image_uri + ':' + self.current_version,
            "Essential": 'true',
            "LogConfiguration": self._gen_log_config(service_name),
            "MemoryReservation": int(config['memory_reservation']),
            "Cpu": 0
        }

        if 'http_interface' in config:
            container_definition_arguments['PortMappings'] = [
                PortMapping(ContainerPort=int(config['http_interface']
                                              ['container_port']))
            ]

        if config['command'] is not None:
            container_definition_arguments['Command'] = [config['command']]

        cd = ContainerDefinition(**container_definition_arguments)

        task_role = self.template.add_resource(
            Role(service_name + "Role",
                 AssumeRolePolicyDocument=PolicyDocument(Statement=[
                     Statement(Effect=Allow,
                               Action=[AssumeRole],
                               Principal=Principal(
                                   "Service", ["ecs-tasks.amazonaws.com"]))
                 ])))

        launch_type_td = {}
        if launch_type == self.LAUNCH_TYPE_FARGATE:
            launch_type_td = {
                'RequiresCompatibilities': ['FARGATE'],
                'ExecutionRoleArn':
                boto3.resource('iam').Role('ecsTaskExecutionRole').arn,
                'NetworkMode':
                'awsvpc',
                'Cpu':
                str(config['fargate']['cpu']),
                'Memory':
                str(config['fargate']['memory'])
            }

        td = TaskDefinition(service_name + "TaskDefinition",
                            Family=service_name + "Family",
                            ContainerDefinitions=[cd],
                            TaskRoleArn=Ref(task_role),
                            **launch_type_td)

        self.template.add_resource(td)
        desired_count = self._get_desired_task_count_for_service(service_name)
        deployment_configuration = DeploymentConfiguration(
            MinimumHealthyPercent=100, MaximumPercent=200)
        if 'http_interface' in config:
            alb, lb, service_listener, alb_sg = self._add_alb(
                cd, service_name, config, launch_type)

            if launch_type == self.LAUNCH_TYPE_FARGATE:
                # if launch type is ec2, then services inherit the ec2 instance security group
                # otherwise, we need to specify a security group for the service
                service_security_group = SecurityGroup(
                    pascalcase("FargateService" + self.env + service_name),
                    GroupName=pascalcase("FargateService" + self.env +
                                         service_name),
                    SecurityGroupIngress=[{
                        'IpProtocol':
                        'TCP',
                        'SourceSecurityGroupId':
                        Ref(alb_sg),
                        'ToPort':
                        int(config['http_interface']['container_port']),
                        'FromPort':
                        int(config['http_interface']['container_port']),
                    }],
                    VpcId=Ref(self.vpc),
                    GroupDescription=pascalcase("FargateService" + self.env +
                                                service_name))
                self.template.add_resource(service_security_group)

                launch_type_svc = {
                    'NetworkConfiguration':
                    NetworkConfiguration(
                        AwsvpcConfiguration=AwsvpcConfiguration(
                            Subnets=[
                                Ref(self.private_subnet1),
                                Ref(self.private_subnet2)
                            ],
                            SecurityGroups=[Ref(service_security_group)]))
                }
            else:
                launch_type_svc = {
                    'Role': Ref(self.ecs_service_role),
                    'PlacementStrategies': self.PLACEMENT_STRATEGIES
                }
            svc = Service(
                service_name,
                LoadBalancers=[lb],
                Cluster=self.cluster_name,
                TaskDefinition=Ref(td),
                DesiredCount=desired_count,
                DependsOn=service_listener.title,
                LaunchType=launch_type,
                **launch_type_svc,
            )
            self.template.add_output(
                Output(service_name + 'EcsServiceName',
                       Description='The ECS name which needs to be entered',
                       Value=GetAtt(svc, 'Name')))
            self.template.add_output(
                Output(
                    service_name + "URL",
                    Description="The URL at which the service is accessible",
                    Value=Sub("https://${" + alb.name + ".DNSName}")))
            self.template.add_resource(svc)
        else:
            launch_type_svc = {}
            if launch_type == self.LAUNCH_TYPE_FARGATE:
                # if launch type is ec2, then services inherit the ec2 instance security group
                # otherwise, we need to specify a security group for the service
                service_security_group = SecurityGroup(
                    pascalcase("FargateService" + self.env + service_name),
                    GroupName=pascalcase("FargateService" + self.env +
                                         service_name),
                    SecurityGroupIngress=[],
                    VpcId=Ref(self.vpc),
                    GroupDescription=pascalcase("FargateService" + self.env +
                                                service_name))
                self.template.add_resource(service_security_group)
                launch_type_svc = {
                    'NetworkConfiguration':
                    NetworkConfiguration(
                        AwsvpcConfiguration=AwsvpcConfiguration(
                            Subnets=[
                                Ref(self.private_subnet1),
                                Ref(self.private_subnet2)
                            ],
                            SecurityGroups=[Ref(service_security_group)]))
                }
            else:
                launch_type_svc = {
                    'PlacementStrategies': self.PLACEMENT_STRATEGIES
                }
            svc = Service(service_name,
                          Cluster=self.cluster_name,
                          TaskDefinition=Ref(td),
                          DesiredCount=desired_count,
                          DeploymentConfiguration=deployment_configuration,
                          LaunchType=launch_type,
                          **launch_type_svc)
            self.template.add_output(
                Output(service_name + 'EcsServiceName',
                       Description='The ECS name which needs to be entered',
                       Value=GetAtt(svc, 'Name')))
            self.template.add_resource(svc)
        self._add_service_alarms(svc)
Esempio n. 20
0
def _subdomain_for_instance():
    return Sub('sys.${subdomain}')
Esempio n. 21
0
def create_cloud_front_template():
    template = Template()
    template.set_transform('AWS::Serverless-2016-10-31')

    bucket = template.add_resource(
        resource=Bucket(
            title='SampleOriginBucket',
            BucketName=Sub('sample-origin-bucket-${AWS::AccountId}')
        )
    )

    identity = template.add_resource(
        resource=CloudFrontOriginAccessIdentity(
            title='SampleOriginAccessIdentity',
            CloudFrontOriginAccessIdentityConfig=CloudFrontOriginAccessIdentityConfig(
                Comment='sample-lambda-edge'
            )
        )
    )

    template.add_resource(
        resource=BucketPolicy(
            title='SampleBucketPolicy',
            Bucket=Ref(bucket),
            PolicyDocument={
                'Statement': [{
                    'Action': 's3:GetObject',
                    'Effect': 'Allow',
                    'Resource': Join(delimiter='/', values=[GetAtt(bucket, 'Arn'), '*']),
                    'Principal': {
                        'CanonicalUser': GetAtt(logicalName=identity, attrName='S3CanonicalUserId')
                    }
                }]
            }
        )
    )

    template.add_resource(
        resource=Distribution(
            title='SampleDistribution',
            DistributionConfig=DistributionConfig(
                DefaultCacheBehavior=DefaultCacheBehavior(
                    ForwardedValues=ForwardedValues(
                        QueryString=True,
                    ),
                    LambdaFunctionAssociations=[
                        LambdaFunctionAssociation(
                            EventType='viewer-request',
                            LambdaFunctionARN=Sub([
                                '${FUNCTION_ARN}:8', {'FUNCTION_ARN': ImportValue(get_export_name())}
                            ]),
                        )
                    ],
                    TargetOriginId=Sub('S3-${' + bucket.title + '}'),
                    ViewerProtocolPolicy='redirect-to-https',
                ),
                Enabled=True,
                Origins=[
                    Origin(
                        Id=Sub('S3-${' + bucket.title + '}'),
                        DomainName=Sub('${' + bucket.title + '}.s3.amazonaws.com'),
                        S3OriginConfig=S3OriginConfig(
                            OriginAccessIdentity=Sub('origin-access-identity/cloudfront/${' + identity.title + '}')
                        )
                    )
                ],
            )
        )
    )

    with open('./cloudfront.yml', mode='w') as file:
        file.write(template.to_yaml())
Esempio n. 22
0
    def create_template(self):
        """Create template (main function called by Stacker)."""
        template = self.template
        variables = self.get_variables()
        template.add_version('2010-09-09')
        template.add_description('Kubernetes Master via EKS - V1.0.0')

        # Resources
        ccpsecuritygroup = template.add_resource(
            ec2.SecurityGroup(
                'ClusterControlPlaneSecurityGroup',
                GroupDescription='Cluster communication with worker nodes',
                Tags=[
                    {'Key': Sub('kubernetes.io/cluster/${EksClusterName}'),
                     'Value': 'owned'},
                    {'Key': 'Product',
                     'Value': 'Kubernetes'},
                    {'Key': 'Project',
                     'Value': 'eks'},
                    {'Key': 'Name',
                     'Value': Sub('${EksClusterName}-sg-worker-nodes')}
                ],
                VpcId=variables['VPC'].ref
            )
        )
        template.add_output(
            Output(
                ccpsecuritygroup.title,
                Description='Cluster communication with worker nodes',
                Export=Export(
                    Sub('${AWS::StackName}-ControlPlaneSecurityGroup')
                ),
                Value=ccpsecuritygroup.ref()
            )
        )

        eksservicerole = template.add_resource(
            iam.Role(
                'EksServiceRole',
                AssumeRolePolicyDocument=make_simple_assume_policy(
                    'eks.amazonaws.com'
                ),
                ManagedPolicyArns=[
                    IAM_POLICY_ARN_PREFIX + i for i in [
                        'AmazonEKSClusterPolicy',
                        'AmazonEKSServicePolicy'
                    ]
                ],
                Policies=[
                    iam.Policy(
                        PolicyName='EksServiceRolePolicy',
                        PolicyDocument=PolicyDocument(
                            Statement=[
                                Statement(
                                    Action=[awacs.iam.CreateServiceLinkedRole,
                                            awacs.iam.PutRolePolicy],
                                    Condition=Condition(
                                        StringLike(
                                            'iam:AWSServiceName',
                                            'elasticloadbalancing.amazonaws.com'  # noqa
                                        )
                                    ),
                                    Effect=Allow,
                                    Resource=[
                                        Sub('arn:aws:iam::${AWS::AccountId}:role/'  # noqa
                                            'aws-service-role/'
                                            'elasticloadbalancing.amazonaws.com/'  # noqa
                                            'AWSServiceRoleForElasticLoadBalancing*')  # noqa
                                    ]
                                )
                            ]
                        )
                    )
                ]
            )
        )

        ekscluster = template.add_resource(
            eks.Cluster(
                'EksCluster',
                Name=variables['EksClusterName'].ref,
                Version=variables['EksVersion'].ref,
                RoleArn=eksservicerole.get_att('Arn'),
                ResourcesVpcConfig=eks.ResourcesVpcConfig(
                    SecurityGroupIds=[ccpsecuritygroup.ref()],
                    SubnetIds=variables['EksSubnets'].ref
                )
            )
        )
        template.add_output(
            Output(
                "%sName" % ekscluster.title,
                Description='EKS Cluster Name',
                Export=Export(
                    Sub("${AWS::StackName}-%sName" % ekscluster.title)
                ),
                Value=ekscluster.ref()
            )
        )
        template.add_output(
            Output(
                "%sEndpoint" % ekscluster.title,
                Description='EKS Cluster Endpoint',
                Export=Export(
                    Sub("${AWS::StackName}-%sEndpoint" % ekscluster.title)
                ),
                Value=ekscluster.get_att('Endpoint')
            )
        )

        # Additional Outputs
        template.add_output(
            Output(
                'VpcId',
                Description='EKS Cluster VPC Id',
                Export=Export(
                    Sub('${AWS::StackName}-VpcId')
                ),
                Value=variables['VPC'].ref
            )
        )
        template.add_output(
            Output(
                'Subnets',
                Description='EKS Cluster Subnets',
                Export=Export(
                    Sub('${AWS::StackName}-Subnets')
                ),
                Value=Join(',', variables['EksSubnets'].ref)
            )
        )
Esempio n. 23
0
 def test_sub_without_vars(self):
     s = 'foo ${AWS::Region}'
     raw = Sub(s)
     actual = raw.to_dict()
     expected = {'Fn::Sub': 'foo ${AWS::Region}'}
     self.assertEqual(expected, actual)
Esempio n. 24
0
    def _add_alb(self, cd, service_name, config):
        sg_name = 'SG'+self.env+service_name
        svc_alb_sg = SecurityGroup(
            re.sub(r'\W+', '', sg_name),
            GroupName=self.env+'-'+service_name,
            SecurityGroupIngress=self._generate_alb_security_group_ingress(
                config
            ),
            VpcId=Ref(self.vpc),
            GroupDescription=Sub(service_name+"-alb-sg")
        )
        self.template.add_resource(svc_alb_sg)
        alb_name = service_name + pascalcase(self.env)
        if config['http_interface']['internal']:
            alb_subnets = [
                Ref(self.private_subnet1),
                Ref(self.private_subnet2)
            ]
            scheme = "internal"
            alb_name += 'Internal'
            alb_name = alb_name[:32]
            alb = ALBLoadBalancer(
                'ALB' + service_name,
                Subnets=alb_subnets,
                SecurityGroups=[
                    self.alb_security_group,
                    Ref(svc_alb_sg)
                ],
                Name=alb_name,
                Tags=[
                    {'Value': alb_name, 'Key': 'Name'}
                ],
                Scheme=scheme
            )
        else:
            alb_subnets = [
                Ref(self.public_subnet1),
                Ref(self.public_subnet2)
            ]
            alb_name = alb_name[:32]
            alb = ALBLoadBalancer(
                'ALB' + service_name,
                Subnets=alb_subnets,
                SecurityGroups=[
                    self.alb_security_group,
                    Ref(svc_alb_sg)
                ],
                Name=alb_name,
                Tags=[
                    {'Value': alb_name, 'Key': 'Name'}
                ]
            )

        self.template.add_resource(alb)

        target_group_name = "TargetGroup" + service_name
        health_check_path = config['http_interface']['health_check_path'] if 'health_check_path' in config['http_interface'] else "/elb-check"
        if config['http_interface']['internal']:
            target_group_name = target_group_name + 'Internal'

        service_target_group = TargetGroup(
            target_group_name,
            HealthCheckPath=health_check_path,
            HealthyThresholdCount=2,
            HealthCheckIntervalSeconds=30,
            TargetGroupAttributes=[
                TargetGroupAttribute(
                    Key='deregistration_delay.timeout_seconds',
                    Value='30'
                )
            ],
            VpcId=Ref(self.vpc),
            Protocol="HTTP",
            Matcher=Matcher(HttpCode="200-399"),
            Port=int(config['http_interface']['container_port']),
            HealthCheckTimeoutSeconds=10,
            UnhealthyThresholdCount=3
        )
        self.template.add_resource(service_target_group)
        # Note: This is a ECS Loadbalancer definition. Not an ALB.
        # Defining this causes the target group to add a target to the correct
        # port in correct ECS cluster instance for the service container.
        lb = LoadBalancer(
            ContainerName=cd.Name,
            TargetGroupArn=Ref(service_target_group),
            ContainerPort=int(config['http_interface']['container_port'])
        )
        target_group_action = Action(
            TargetGroupArn=Ref(target_group_name),
            Type="forward"
        )
        service_listener = self._add_service_listener(
            service_name,
            target_group_action,
            alb,
            config['http_interface']['internal']
        )
        self._add_alb_alarms(service_name, alb)
        return alb, lb, service_listener
Esempio n. 25
0
    def create_resources(self):
        """Create the resources."""
        template = self.template
        variables = self.get_variables()
        app_name = variables["AppName"].ref

        lambda_iam_role = template.add_resource(
            iam.Role(
                "LambdaRole",
                RoleName=Join("-", [app_name, "lambda-role"]),
                AssumeRolePolicyDocument=Policy(
                    Version="2012-10-17",
                    Statement=[
                        Statement(
                            Effect=Allow,
                            Action=[awacs.sts.AssumeRole],
                            Principal=Principal("Service",
                                                ["lambda.amazonaws.com"]),
                        )
                    ],
                ),
                Path="/service-role/",
                Policies=[
                    iam.Policy(
                        PolicyName=Join("-", [app_name, "lambda-policy"]),
                        PolicyDocument=Policy(
                            Version="2012-10-17",
                            Statement=[
                                Statement(
                                    Action=[
                                        awacs.logs.CreateLogGroup,
                                        awacs.logs.CreateLogStream,
                                        awacs.logs.PutLogEvents,
                                    ],
                                    Effect=Allow,
                                    Resource=[
                                        Join(
                                            "",
                                            [
                                                "arn:",
                                                Partition,
                                                ":logs:",
                                                Region,
                                                ":",
                                                AccountId,
                                                ":log-group:/aws/lambda/",
                                                app_name,
                                                "-*",
                                            ],
                                        )
                                    ],
                                    Sid="WriteLogs",
                                )
                            ],
                        ),
                    )
                ],
            ))

        lambda_function = template.add_resource(
            awslambda.Function(
                "LambdaFunction",
                Code=variables["Code"],
                Handler=variables["Entrypoint"].ref,
                Role=GetAtt(lambda_iam_role, "Arn"),
                Runtime="python3.6",
                Timeout=30,
                MemorySize=128,
                FunctionName=Join("-", [app_name, "integrationtest"]),
            ))

        template.add_output(
            Output(
                lambda_iam_role.title,
                Description="Lambda Role",
                Export=Export(
                    Sub("${AWS::StackName}-%s" % lambda_iam_role.title)),
                Value=Ref(lambda_iam_role),
            ))
        template.add_output(
            Output(
                lambda_function.title,
                Description="Lambda Function",
                Export=Export(
                    Sub("${AWS::StackName}-%s" % lambda_function.title)),
                Value=GetAtt(lambda_function, "Arn"),
            ))
        template.add_output(
            Output(
                lambda_function.title + "Name",
                Description="Lambda Function Name",
                Export=Export(
                    Sub("${AWS::StackName}-%sName" % lambda_function.title)),
                Value=Ref(lambda_function),
            ))
                Select(0, Split("-", Ref("AWS::StackName"))), "cluster-vpc-id"
            ])),
    ))

t.add_resource(
    elb.Listener("Listener",
                 Port="3000",
                 Protocol="HTTP",
                 LoadBalancerArn=Ref("LoadBalancer"),
                 DefaultActions=[
                     elb.Action(Type="forward",
                                TargetGroupArn=Ref("TargetGroup"))
                 ]))

t.add_output(
    Output(
        "TargetGroup",
        Description="TargetGroup",
        Value=Ref("TargetGroup"),
        Export=Export(Sub("${AWS::StackName}-target-group")),
    ))

t.add_output(
    Output("URL",
           Description="Helloworld URL",
           Value=Join("",
                      ["http://",
                       GetAtt("LoadBalancer", "DNSName"), ":3000"])))

print(t.to_json())
Esempio n. 27
0
def main(**params):
    try:
        # Metadata
        t = Template()
        t.set_version("2010-09-09")
        t.set_description("(SOCA) - Base template to deploy compute nodes.")
        allow_anonymous_data_collection = params["MetricCollectionAnonymous"]
        debug = False
        mip_usage = False
        instances_list = params["InstanceType"].split("+")
        asg_lt = asg_LaunchTemplate()
        ltd = LaunchTemplateData("NodeLaunchTemplateData")
        mip = MixedInstancesPolicy()
        stack_name = Ref("AWS::StackName")

        # Begin LaunchTemplateData
        UserData = '''#!/bin/bash -xe
export PATH=$PATH:/usr/local/bin
if [[ "''' + params['BaseOS'] + '''" == "centos7" ]] || [[ "''' + params['BaseOS'] + '''" == "rhel7" ]];
    then
        EASY_INSTALL=$(which easy_install-2.7)
        $EASY_INSTALL pip
        PIP=$(which pip2.7)
        $PIP install awscli
        yum install -y nfs-utils # enforce install of nfs-utils
else
     # Upgrade awscli on ALI (do not use yum)
     EASY_INSTALL=$(which easy_install-2.7)
     $EASY_INSTALL pip
     PIP=$(which pip)
     $PIP install awscli --upgrade 
fi
if [[ "''' + params['BaseOS'] + '''" == "amazonlinux2" ]];
    then
        /usr/sbin/update-motd --disable
fi

GET_INSTANCE_TYPE=$(curl http://169.254.169.254/latest/meta-data/instance-type)
echo export "SOCA_CONFIGURATION="''' + str(params['ClusterId']) + '''"" >> /etc/environment
echo export "SOCA_BASE_OS="''' + str(params['BaseOS']) + '''"" >> /etc/environment
echo export "SOCA_JOB_QUEUE="''' + str(params['JobQueue']) + '''"" >> /etc/environment
echo export "SOCA_JOB_OWNER="''' + str(params['JobOwner']) + '''"" >> /etc/environment
echo export "SOCA_JOB_NAME="''' + str(params['JobName']) + '''"" >> /etc/environment
echo export "SOCA_JOB_PROJECT="''' + str(params['JobProject']) + '''"" >> /etc/environment
echo export "SOCA_VERSION="''' + str(params['Version']) + '''"" >> /etc/environment
echo export "SOCA_JOB_EFA="''' + str(params['Efa']).lower() + '''"" >> /etc/environment
echo export "SOCA_JOB_ID="''' + str(params['JobId']) + '''"" >> /etc/environment
echo export "SOCA_SCRATCH_SIZE=''' + str(params['ScratchSize']) + '''" >> /etc/environment
echo export "SOCA_INSTALL_BUCKET="''' + str(params['S3Bucket']) + '''"" >> /etc/environment
echo export "SOCA_INSTALL_BUCKET_FOLDER="''' + str(params['S3InstallFolder']) + '''"" >> /etc/environment
echo export "SOCA_FSX_LUSTRE_BUCKET="''' + str(params['FSxLustreConfiguration']['fsx_lustre']).lower() + '''"" >> /etc/environment
echo export "SOCA_FSX_LUSTRE_DNS="''' + str(params['FSxLustreConfiguration']['existing_fsx']).lower() + '''"" >> /etc/environment
echo export "SOCA_INSTANCE_TYPE=$GET_INSTANCE_TYPE" >> /etc/environment
echo export "SOCA_INSTANCE_HYPERTHREADING="''' + str(params['ThreadsPerCore']).lower() + '''"" >> /etc/environment
echo export "SOCA_HOST_SYSTEM_LOG="/apps/soca/''' + str(params['ClusterId']) + '''/cluster_node_bootstrap/logs/''' + str(params['JobId']) + '''/$(hostname -s)"" >> /etc/environment
echo export "AWS_STACK_ID=${AWS::StackName}" >> /etc/environment
echo export "AWS_DEFAULT_REGION=${AWS::Region}" >> /etc/environment


source /etc/environment
AWS=$(which aws)

# Give yum permission to the user on this specific machine
echo "''' + params['JobOwner'] + ''' ALL=(ALL) /bin/yum" >> /etc/sudoers

mkdir -p /apps
mkdir -p /data

# Mount EFS
echo "''' + params['EFSDataDns'] + ''':/ /data nfs4 nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2 0 0" >> /etc/fstab
echo "''' + params['EFSAppsDns'] + ''':/ /apps nfs4 nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2 0 0" >> /etc/fstab
mount -a 

# Configure NTP
yum remove -y ntp
yum install -y chrony
mv /etc/chrony.conf  /etc/chrony.conf.original
echo -e """
# use the local instance NTP service, if available
server 169.254.169.123 prefer iburst minpoll 4 maxpoll 4

# Use public servers from the pool.ntp.org project.
# Please consider joining the pool (http://www.pool.ntp.org/join.html).
# !!! [BEGIN] SOCA REQUIREMENT
# You will need to open UDP egress traffic on your security group if you want to enable public pool
#pool 2.amazon.pool.ntp.org iburst
# !!! [END] SOCA REQUIREMENT
# Record the rate at which the system clock gains/losses time.
driftfile /var/lib/chrony/drift

# Allow the system clock to be stepped in the first three updates
# if its offset is larger than 1 second.
makestep 1.0 3

# Specify file containing keys for NTP authentication.
keyfile /etc/chrony.keys

# Specify directory for log files.
logdir /var/log/chrony

# save data between restarts for fast re-load
dumponexit
dumpdir /var/run/chrony
""" > /etc/chrony.conf
systemctl enable chronyd

# Prepare  Log folder
mkdir -p $SOCA_HOST_SYSTEM_LOG
echo "@reboot /bin/bash /apps/soca/$SOCA_CONFIGURATION/cluster_node_bootstrap/ComputeNodePostReboot.sh >> $SOCA_HOST_SYSTEM_LOG/ComputeNodePostInstall.log 2>&1" | crontab -
$AWS s3 cp s3://$SOCA_INSTALL_BUCKET/$SOCA_INSTALL_BUCKET_FOLDER/scripts/config.cfg /root/
/bin/bash /apps/soca/$SOCA_CONFIGURATION/cluster_node_bootstrap/ComputeNode.sh ''' + params['SchedulerHostname'] + ''' >> $SOCA_HOST_SYSTEM_LOG/ComputeNode.sh.log 2>&1'''

        ltd.EbsOptimized = True
        for instance in instances_list:
            if "t2." in instance:
                ltd.EbsOptimized = False
            else:
                # metal + t2 does not support CpuOptions
                if "metal" not in instance:
                    ltd.CpuOptions = CpuOptions(
                        CoreCount=int(params["CoreCount"]),
                        ThreadsPerCore=1 if params["ThreadsPerCore"] is False else 2)

        ltd.IamInstanceProfile = IamInstanceProfile(Arn=params["ComputeNodeInstanceProfileArn"])
        ltd.KeyName = params["SSHKeyPair"]
        ltd.ImageId = params["ImageId"]
        if params["SpotPrice"] is not False and params["SpotAllocationCount"] is False:
            ltd.InstanceMarketOptions = InstanceMarketOptions(
                MarketType="spot",
                SpotOptions=SpotOptions(
                    MaxPrice=Ref("AWS::NoValue") if params["SpotPrice"] == "auto" else str(params["SpotPrice"])
                    # auto -> cap at OD price
                )
            )
        ltd.InstanceType = instances_list[0]
        ltd.NetworkInterfaces = [NetworkInterfaces(
            InterfaceType="efa" if params["Efa"] is not False else Ref("AWS::NoValue"),
            DeleteOnTermination=True,
            DeviceIndex=0,
            Groups=[params["SecurityGroupId"]]
        )]
        ltd.UserData = Base64(Sub(UserData))
        ltd.BlockDeviceMappings = [
            BlockDeviceMapping(
                DeviceName="/dev/xvda" if params["BaseOS"] == "amazonlinux2" else "/dev/sda1",
                Ebs=EBSBlockDevice(
                    VolumeSize=params["RootSize"],
                    VolumeType="gp2",
                    DeleteOnTermination="false" if params["KeepEbs"] is True else "true",
                    Encrypted=True))
        ]
        if int(params["ScratchSize"]) > 0:
            ltd.BlockDeviceMappings.append(
                BlockDeviceMapping(
                    DeviceName="/dev/xvdbx",
                    Ebs=EBSBlockDevice(
                        VolumeSize=params["ScratchSize"],
                        VolumeType="io1" if int(params["VolumeTypeIops"]) > 0 else "gp2",
                        Iops=params["VolumeTypeIops"] if int(params["VolumeTypeIops"]) > 0 else Ref("AWS::NoValue"),
                        DeleteOnTermination="false" if params["KeepEbs"] is True else "true",
                        Encrypted=True))
            )
        # End LaunchTemplateData

        # Begin Launch Template Resource
        lt = LaunchTemplate("NodeLaunchTemplate")
        lt.LaunchTemplateName = params["ClusterId"] + "-" + str(params["JobId"])
        lt.LaunchTemplateData = ltd
        t.add_resource(lt)
        # End Launch Template Resource

        asg_lt.LaunchTemplateSpecification = LaunchTemplateSpecification(
            LaunchTemplateId=Ref(lt),
            Version=GetAtt(lt, "LatestVersionNumber")
        )

        asg_lt.Overrides = []
        for instance in instances_list:
            asg_lt.Overrides.append(LaunchTemplateOverrides(
                InstanceType=instance))

        # Begin InstancesDistribution
        if params["SpotPrice"] is not False and \
                params["SpotAllocationCount"] is not False and \
                (params["DesiredCapacity"] - params["SpotAllocationCount"]) > 0:
            mip_usage = True
            idistribution = InstancesDistribution()
            idistribution.OnDemandAllocationStrategy = "prioritized"  # only supported value
            idistribution.OnDemandBaseCapacity = params["DesiredCapacity"] - params["SpotAllocationCount"]
            idistribution.OnDemandPercentageAboveBaseCapacity = "0"  # force the other instances to be SPOT
            idistribution.SpotMaxPrice = Ref("AWS::NoValue") if params["SpotPrice"] == "auto" else str(
                params["SpotPrice"])
            idistribution.SpotAllocationStrategy = params['SpotAllocationStrategy']
            mip.InstancesDistribution = idistribution

        # End MixedPolicyInstance

        # Begin FSx for Lustre
        if params["FSxLustreConfiguration"]["fsx_lustre"] is not False:
            if params["FSxLustreConfiguration"]["existing_fsx"] is False:
                fsx_lustre = FileSystem("FSxForLustre")
                fsx_lustre.FileSystemType = "LUSTRE"
                fsx_lustre.StorageCapacity = params["FSxLustreConfiguration"]["capacity"]
                fsx_lustre.SecurityGroupIds = [params["SecurityGroupId"]]
                fsx_lustre.SubnetIds = params["SubnetId"]

                if params["FSxLustreConfiguration"]["s3_backend"] is not False:
                    fsx_lustre_configuration = LustreConfiguration()
                    fsx_lustre_configuration.ImportPath = params["FSxLustreConfiguration"]["import_path"] if params["FSxLustreConfiguration"]["import_path"] is not False else params["FSxLustreConfiguration"]["s3_backend"]
                    fsx_lustre_configuration.ExportPath = params["FSxLustreConfiguration"]["import_path"] if params["FSxLustreConfiguration"]["import_path"] is not False else params["FSxLustreConfiguration"]["s3_backend"] + "/" + params["ClusterId"] + "-fsxoutput/job-" +  params["JobId"] + "/"
                    fsx_lustre.LustreConfiguration = fsx_lustre_configuration

                fsx_lustre.Tags = base_Tags(
                    # False disable PropagateAtLaunch
                    Name=str(params["ClusterId"] + "-compute-job-" + params["JobId"]),
                    _soca_JobId=str(params["JobId"]),
                    _soca_JobName=str(params["JobName"]),
                    _soca_JobQueue=str(params["JobQueue"]),
                    _soca_StackId=stack_name,
                    _soca_JobOwner=str(params["JobOwner"]),
                    _soca_JobProject=str(params["JobProject"]),
                    _soca_KeepForever=str(params["KeepForever"]).lower(),
                    _soca_FSx="true",
                    _soca_ClusterId=str(params["ClusterId"]),
                )
                t.add_resource(fsx_lustre)
        # End FSx For Lustre

        # Begin AutoScalingGroup Resource
        asg = AutoScalingGroup("AutoScalingComputeGroup")
        asg.DependsOn = "NodeLaunchTemplate"
        if mip_usage is True or instances_list.__len__() > 1:
            mip.LaunchTemplate = asg_lt
            asg.MixedInstancesPolicy = mip

        else:
            asg.LaunchTemplate = LaunchTemplateSpecification(
                LaunchTemplateId=Ref(lt),
                Version=GetAtt(lt, "LatestVersionNumber"))

        asg.MinSize = int(params["DesiredCapacity"])
        asg.MaxSize = int(params["DesiredCapacity"])
        asg.VPCZoneIdentifier = params["SubnetId"]

        if params["PlacementGroup"] is True:
            pg = PlacementGroup("ComputeNodePlacementGroup")
            pg.Strategy = "cluster"
            t.add_resource(pg)
            asg.PlacementGroup = Ref(pg)

        asg.Tags = Tags(
            Name=str(params["ClusterId"]) + "-compute-job-" + str(params["JobId"]),
            _soca_JobId=str(params["JobId"]),
            _soca_JobName=str(params["JobName"]),
            _soca_JobQueue=str(params["JobQueue"]),
            _soca_StackId=stack_name,
            _soca_JobOwner=str(params["JobOwner"]),
            _soca_JobProject=str(params["JobProject"]),
            _soca_KeepForever=str(params["KeepForever"]).lower(),
            _soca_ClusterId=str(params["ClusterId"]),
            _soca_NodeType="soca-compute-node")
        t.add_resource(asg)
        # End AutoScalingGroup Resource

        # Begin Custom Resource
        # Change Mapping to No if you want to disable this
        if allow_anonymous_data_collection is True:
            metrics = CustomResourceSendAnonymousMetrics("SendAnonymousData")
            metrics.ServiceToken = params["SolutionMetricLambda"]
            metrics.DesiredCapacity = str(params["DesiredCapacity"])
            metrics.InstanceType = str(params["InstanceType"])
            metrics.Efa = str(params["Efa"])
            metrics.ScratchSize = str(params["ScratchSize"])
            metrics.RootSize = str(params["RootSize"])
            metrics.SpotPrice = str(params["SpotPrice"])
            metrics.BaseOS = str(params["BaseOS"])
            metrics.StackUUID = str(params["StackUUID"])
            metrics.KeepForever = str(params["KeepForever"])
            metrics.FsxLustre = str(params["FSxLustreConfiguration"])
            t.add_resource(metrics)
            # End Custom Resource

        if debug is True:
            print(t.to_json())

        # Tags must use "soca:<Key>" syntax
        template_output = t.to_yaml().replace("_soca_", "soca:")
        return {'success': True,
                'output': template_output}

    except Exception as e:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
        return {'success': False,
                'output': 'cloudformation_builder.py: ' + (
                            str(e) + ': error :' + str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))}
Esempio n. 28
0
    AllowedValues=MAPPINGS_PARAMS[2]
)
RUNTIME_VERSIONS = Parameter(
    "BuildRuntimeVersion",
    Type="String",
    AllowedValues=MAPPINGS_PARAMS[3]
)
TEMPLATE.add_parameter(RUNTIME_LANGUAGE)
TEMPLATE.add_parameter(RUNTIME_VERSIONS)
TEMPLATE.add_mapping('Languages', MAPPINGS_PARAMS[1])
BUCKET = TEMPLATE.add_parameter(Parameter(
    'PipelineBucket',
    Type="String",
    AllowedPattern="[a-z0-9-]+"
))
ROLE = role_build(Sub(f'arn:aws:s3:::${{{BUCKET.title}}}/*'))
PROJECT = get_build_project(
    ROLE,
    RUNTIME_LANGUAGE.title,
    RUNTIME_VERSIONS.title,
    **{
        'Tags':{
            '10-technical:team': 'PlatformEngineering',
            '10-technical:runtime_language' : Ref(RUNTIME_LANGUAGE),
            '10-technical:runtime_version' : Ref(RUNTIME_VERSIONS)
        },
        'SourceType': 'CODEPIPELINE'
    }
)
TEMPLATE.add_resource(ROLE)
TEMPLATE.add_resource(PROJECT)
Esempio n. 29
0
function_code = cfnutil.load_python_lambda('lambdas/s3_extract.py')

# HACK: Use hard-coded name here to avoid circular depencency
h = hashlib.md5()
h.update(function_code)
lambda_function_name = 's3-extract-function-%s' % h.hexdigest()

src_bucket = t.add_resource(s3.Bucket(
    'SourceBucket',
    NotificationConfiguration= \
        If('BucketNotificationEnabled',
           s3.NotificationConfiguration(
               LambdaConfigurations=[
                   s3.LambdaConfigurations(
                       Function=Sub(
                           'arn:aws:lambda:${AWS::Region}:${AWS::AccountId}:function:%s' \
                           % lambda_function_name),
                       Event='s3:ObjectCreated:*',
                       Filter=s3.Filter(
                           S3Key=s3.S3Key(
                               Rules=[
                                   s3.Rules(Name='prefix', Value=''),
                                   s3.Rules(Name='suffix', Value='.zip')
                               ]
                           )
                       ),
                   ),
                   s3.LambdaConfigurations(
                       Function=Sub(
                           'arn:aws:lambda:${AWS::Region}:${AWS::AccountId}:function:%s' \
                           % lambda_function_name),
Esempio n. 30
0
def main():
    template = Template()

    template.add_resource(
        ecs.Cluster("ECSCluster", ClusterName="WorldCheckCluster"))

    template.add_resource(
        iam.Role("ECSTaskRole",
                 AssumeRolePolicyDocument={
                     "Version":
                     "2012-10-17",
                     "Statement": [{
                         "Effect": "Allow",
                         "Principal": {
                             "Service": ["ecs-tasks.amazonaws.com"]
                         },
                         "Action": ["sts:AssumeRole"]
                     }]
                 }))

    template.add_resource(
        iam.Role(
            "ECSServiceSchedulerRole",
            AssumeRolePolicyDocument={
                "Version":
                "2012-10-17",
                "Statement": [{
                    "Effect": "Allow",
                    "Principal": {
                        "Service": ["ecs.amazonaws.com"]
                    },
                    "Action": ["sts:AssumeRole"]
                }]
            },
            Policies=[
                iam.Policy(PolicyDocument={
                    "Version":
                    "2012-10-17",
                    "Statement": [{
                        "Effect":
                        "Allow",
                        "Action": [
                            "ec2:Describe*",
                            "elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
                            "elasticloadbalancing:DeregisterTargets",
                            "elasticloadbalancing:Describe*",
                            "elasticloadbalancing:RegisterInstancesWithLoadBalancer",
                            "elasticloadbalancing:RegisterTargets"
                        ],
                        "Resource":
                        "*"
                    }]
                },
                           PolicyName="ecs-service")
            ]))

    template.add_resource(
        iam.Role("EC2InstanceRole",
                 AssumeRolePolicyDocument={
                     "Version":
                     "2012-10-17",
                     "Statement": [{
                         "Effect": "Allow",
                         "Principal": {
                             "Service": ["ec2.amazonaws.com"]
                         },
                         "Action": ["sts:AssumeRole"]
                     }]
                 },
                 Policies=[
                     iam.Policy(PolicyDocument={
                         "Version":
                         "2012-10-17",
                         "Statement": [{
                             "Effect":
                             "Allow",
                             "Action": [
                                 "ecs:CreateCluster",
                                 "ecs:DeregisterContainerInstance",
                                 "ecs:DiscoverPollEndpoint", "ecs:Poll",
                                 "ecs:RegisterContainerInstance",
                                 "ecs:StartTelemetrySession",
                                 "ecr:GetAuthorizationToken",
                                 "ecr:BatchGetImage",
                                 "ecr:GetDownloadUrlForLayer", "ecs:Submit*",
                                 "logs:CreateLogStream", "logs:PutLogEvents",
                                 "ec2:DescribeTags", "cloudwatch:PutMetricData"
                             ],
                             "Resource":
                             "*"
                         }]
                     },
                                PolicyName="ecs-service")
                 ]))
    template.add_resource(
        iam.InstanceProfile("EC2InstanceProfile",
                            Roles=[Ref("EC2InstanceRole")]))

    with open("user-data.sh", "r") as f:
        user_data_content = f.readlines()

    template.add_resource(
        ec2.Instance(
            "EC2Instance",
            ImageId="ami-13f7226a",
            InstanceType="t2.micro",
            SecurityGroups=["default"],
            UserData=Base64(Join('', [Sub(x) for x in user_data_content])),
            IamInstanceProfile=Ref("EC2InstanceProfile"),
        ))

    template.add_resource(
        ecs.TaskDefinition(
            "ECSTaskDefinition",
            TaskRoleArn=Ref("ECSTaskRole"),
            ContainerDefinitions=[
                ecs.ContainerDefinition(
                    Name="SimpleServer",
                    Memory="128",
                    Image="abbas123456/simple-server:latest",
                    PortMappings=[ecs.PortMapping(ContainerPort=8000)],
                )
            ]))

    template.add_resource(
        elb.TargetGroup(
            "ECSTargetGroup",
            VpcId="vpc-925497f6",
            Port=8000,
            Protocol="HTTP",
        ))

    template.add_resource(
        elb.LoadBalancer(
            "LoadBalancer",
            Subnets=["subnet-a321c8fb", "subnet-68fa271e", "subnet-689d350c"],
            SecurityGroups=["sg-0202bd65"]))

    template.add_resource(
        elb.Listener(
            "LoadBalancerListener",
            DefaultActions=[
                elb.Action(Type="forward",
                           TargetGroupArn=Ref("ECSTargetGroup"))
            ],
            LoadBalancerArn=Ref("LoadBalancer"),
            Port=80,
            Protocol="HTTP",
        ))

    template.add_resource(
        ecs.Service("ECSService",
                    Cluster=Ref("ECSCluster"),
                    DesiredCount=1,
                    LoadBalancers=[
                        ecs.LoadBalancer(ContainerPort=8000,
                                         ContainerName="SimpleServer",
                                         TargetGroupArn=Ref("ECSTargetGroup"))
                    ],
                    Role=Ref("ECSServiceSchedulerRole"),
                    TaskDefinition=Ref("ECSTaskDefinition"),
                    DependsOn="LoadBalancerListener"))

    return template.to_json()
        AssumeRolePolicyDocument=Policy(Statement=[
            Statement(Effect=Allow,
                      Action=[AssumeRole],
                      Principal=Principal("Service", ["lambda.amazonaws.com"]))
        ])))

t.add_resource(
    Function(
        "LambdaFunction",
        Code=Code(S3Bucket=Ref("LambdaBucket"), S3Key=Ref("S3Key")),
        Description=
        "Function used to save galileo babel notifications in a bucket",
        Handler="galileo_babel_s3.lambda_handler",
        MemorySize=Ref("LambdaMemorySize"),
        FunctionName=If("IsTest", "testtest-editorial-search-galileo-babel",
                        Sub("${LambdaEnv}-editorial-search-galileo-babel")),
        Environment=Environment(
            Variables={
                'GALILEO_BABEL_LAMBDA_ENV': Sub("${LambdaEnv}"),
                'BUCKET': Sub("${LambdaEnv}-editorial-search-galileo-babel")
            }),
        Role=GetAtt("LambdaExecutionRole", "Arn"),
        Runtime="python3.6",
        Tags=Tags(BBCProject="editorial-platform",
                  BBCComponent="editorial-search-galileo-babel",
                  BBCEnvironment=Sub("${LambdaEnv}")),
        Timeout=Ref("LambdaTimeout")))

t.add_resource(
    PolicyType(
        "FunctionPolicy",
Esempio n. 32
0
        "Environment": Environment(
            Variables={
                "HyP3StackName": Ref("AWS::StackName")
            }
        ),
        "Timeout": 60
    }
))

custom_metric_target = Target(
    "CustomMetricTarget",
    Arn=GetAtt(custom_metric, 'Arn'),
    Id="CustomMetricFunction1",
    Input=Sub(
        '{"QueueUrl":"${QueueUrl}","AutoScalingGroupName":"${AGName}","MetricName":"${MetricName}"}',
        QueueUrl=Ref(start_events),
        AGName=Ref(processing_group),
        MetricName=custom_metric_name
    )
)

custom_metric_rule = t.add_resource(Rule(
    "CustomMetricSchedule",
    ScheduleExpression="rate(1 minute)",
    State="ENABLED",
    Targets=[custom_metric_target]
))

PermissionForEventsToInvokeLambda = t.add_resource(Permission(
    "EventScheduleCustomMetricPermissions",
    FunctionName=Ref(custom_metric),
    Action="lambda:InvokeFunction",
Esempio n. 33
0
        "ParamLaeArn",
        Type=constants.STRING,
        Description="ARN of the Lambda@Edge function",
    ))
template.set_parameter_label(param_laearn, "Lambda@Edge ARN")

cloudformation_tags = template.add_resource(
    custom_resources.cloudformation.Tags("CfnTags"))

# Don't simply import-output the Lambda@Edge ARN, but do it via a Parameter
# This allows us to migrate to a new L@E function gradually (otherwise, the output value would be locked and can't
# change)
lae_arn = template.add_resource(
    custom_resources.ssm.Parameter(
        "LaeArn",
        Name=Sub('/${AWS::StackName}/lae-arn'),
        Type="String",
        Value=Ref(param_laearn),
        Tags=GetAtt(cloudformation_tags, 'TagList'),
    ))
template.add_output(
    Output(
        "LaeArnParameter",
        Description='SSM Parameter containing the Lambda@Edge ARN',
        Value=Ref(lae_arn),
        Export=Export(Join('-', [Ref(AWS_STACK_NAME), 'lae-arn'])),
    ))

template.add_output(
    Output(
        "DomainTable",