Beispiel #1
0
    def __init__(self, scope: core.Construct, id_: str,
                 num_of_azs: int) -> None:
        super().__init__(scope, id_)

        self.audit_vpc = Vpc(
            self,
            id_,
            max_azs=num_of_azs,
            subnet_configuration=[
                #Currently IOT, AppConfig & Cloudmap are not accessable via VPC endpoint, so we use NAT GW access them
                SubnetConfiguration(name=PRIVATE_SUBNET_GROUP,
                                    subnet_type=SubnetType.PRIVATE,
                                    cidr_mask=24),
                SubnetConfiguration(name=PUBLIC_NAT_GWS_SUBNET_GROUP,
                                    subnet_type=SubnetType.PUBLIC,
                                    cidr_mask=24)
            ],
            gateway_endpoints={
                'S3':
                GatewayVpcEndpointOptions(
                    service=GatewayVpcEndpointAwsService.S3,
                    subnets=[
                        SubnetSelection(subnet_group_name=PRIVATE_SUBNET_GROUP)
                    ]),
                'DynamoDb':
                GatewayVpcEndpointOptions(
                    service=GatewayVpcEndpointAwsService.DYNAMODB,
                    subnets=[
                        SubnetSelection(subnet_group_name=PRIVATE_SUBNET_GROUP)
                    ]),
            },
            enable_dns_support=True,  # For the ElasticSearch Public Domain
            enable_dns_hostnames=True)

        self.audit_vpc.add_interface_endpoint(
            'SsmVpcEndpoint',
            service=InterfaceVpcEndpointAwsService.SSM,
            subnets=SubnetSelection(one_per_az=True))

        self.audit_vpc.add_interface_endpoint(
            'SqsVpcEndpoint',
            service=InterfaceVpcEndpointAwsService.SQS,
            subnets=SubnetSelection(one_per_az=True))
        self.audit_vpc.add_interface_endpoint(
            'Ec2VpcEndpoint',
            service=InterfaceVpcEndpointAwsService.EC2,
            subnets=SubnetSelection(one_per_az=True))

        self.audit_vpc.add_interface_endpoint(
            'LambdaVpcEndpoint',
            service=InterfaceVpcEndpointAwsService.LAMBDA_,
            subnets=SubnetSelection(one_per_az=True))

        self.lambdas_sg = SecurityGroup(self,
                                        id='LambdaSg',
                                        vpc=self.audit_vpc,
                                        security_group_name='Audit-Lambda')
Beispiel #2
0
    def __init__(self, scope: Construct, stack_id: str, **kwargs) -> None:
        """
        Initializes a new instance of NetworkTier
        """
        super().__init__(scope, stack_id, **kwargs)

        # We're creating a SubnetSelection with only the standard availability zones to be used to put
        # the NAT gateway in and the VPC interface endpoints, because the local zones do no have
        # these available.
        standard_zone_subnets = SubnetSelection(
            availability_zones=config.availability_zones_standard,
            subnet_type=SubnetType.PUBLIC)

        # The VPC that all components of the render farm will be created in. We are using the `availability_zones()`
        # method to override the availability zones that this VPC will use.
        self.vpc = Vpc(self,
                       'Vpc',
                       max_azs=len(self.availability_zones),
                       subnet_configuration=[
                           SubnetConfiguration(name='Public',
                                               subnet_type=SubnetType.PUBLIC,
                                               cidr_mask=28),
                           SubnetConfiguration(
                               name='Private',
                               subnet_type=SubnetType.PRIVATE_WITH_NAT,
                               cidr_mask=18)
                       ],
                       nat_gateway_subnets=standard_zone_subnets)

        # Add interface endpoints
        for idx, service_info in enumerate(_INTERFACE_ENDPOINT_SERVICES):
            service_name = service_info['name']
            service = service_info['service']
            self.vpc.add_interface_endpoint(service_name,
                                            service=service,
                                            subnets=standard_zone_subnets)

        # Add gateway endpoints
        for idx, service_info in enumerate(_GATEWAY_ENDPOINT_SERVICES):
            service_name = service_info['name']
            service = service_info['service']
            self.vpc.add_gateway_endpoint(service_name,
                                          service=service,
                                          subnets=[standard_zone_subnets])

        # Internal DNS zone for the VPC.
        self.dns_zone = PrivateHostedZone(self,
                                          'DnsZone',
                                          vpc=self.vpc,
                                          zone_name='deadline-test.internal')
Beispiel #3
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # create VPC
        self.bifrost_vpc = Vpc(
            self,
            config.VPC,
            cidr='10.0.0.0/16',
            nat_gateways=0,
            subnet_configuration=[],
            enable_dns_support=True,
            enable_dns_hostnames=True,
        )

        self.internet_gateway = self.attach_internet_gateway()

        self.subnet_id_to_subnet_map = {}
        self.route_table_id_to_route_table_map = {}
        self.security_group_id_to_group_map = {}
        self.instance_id_to_instance_map = {}

        self.create_route_tables()
        self.create_security_groups()

        self.create_subnets()
        self.create_subnet_route_table_associations()

        self.create_routes()
        self.create_instances()
Beispiel #4
0
    def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Subnet configurations for a public and private tier
        subnet1 = SubnetConfiguration(
                name="Public",
                subnet_type=SubnetType.PUBLIC,
                cidr_mask=24)
        subnet2 = SubnetConfiguration(
                name="Private",
                subnet_type=SubnetType.PRIVATE,
                cidr_mask=24)

        vpc = Vpc(self,
                  "TheVPC",
                  cidr="10.0.0.0/16",
                  enable_dns_hostnames=True,
                  enable_dns_support=True,
                  max_azs=2,
                  nat_gateway_provider=NatProvider.gateway(),
                  nat_gateways=1,
                  subnet_configuration=[subnet1, subnet2]
                  )

        # This will export the VPC's ID in CloudFormation under the key
        # 'vpcid'
        core.CfnOutput(self, "vpcid", value=vpc.vpc_id)

        # Prepares output attributes to be passed into other stacks
        # In this case, it is our VPC and subnets.
        self.output_props = props.copy()
        self.output_props['vpc'] = vpc
        self.output_props['subnets'] = vpc.public_subnets
    def __init__(self, scope: cdk.Stack, construct_id: str, vpc_cidr: str,
                 jump_host: str, mgmt_ports: list, subnet_len: int,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # args:
        # - vpc_cidr (str): The CIDR range for the VPC.
        # - jump_host (str): An optional IP address for the jump host. If this
        #                    is not specified, te Security Group will not be
        #                    created.
        # - mgmt_ports (list): A list of TCP ports which the jump host is
        #                      allowed to connect to.
        # - subnet_len (int): The prefix length for subnet CIDR addresses.

        # Create the VPC resource. The VPC does not have an internet gateway,
        # or NAT gateway. Subnets are created in 2 zones.
        subnets = [
            SubnetConfiguration(name="MyVPC-Private",
                                subnet_type=SubnetType.ISOLATED,
                                cidr_mask=subnet_len)
        ]
        self._vpc = Vpc(self,
                        "MyVPC",
                        cidr=vpc_cidr,
                        max_azs=2,
                        nat_gateways=None,
                        subnet_configuration=subnets)

        # Security Group only created if the jump host parameter was
        # specified.
        if jump_host is not None and len(jump_host) > 0:
            self.create_sg(jump_host, mgmt_ports)
Beispiel #6
0
 def __init__(self, scope: Construct, construct_id: str,
              env: Environment) -> None:
     super().__init__(scope, construct_id, env=env)
     smol_table = SmolTable(self, "SmolTable", table_name=TABLE_NAME)
     smol_vpc = Vpc.from_lookup(self, "CoreVPC", vpc_name=VPC_NAME)
     smol_subnets = SubnetSelection(
         one_per_az=True,
         subnet_type=SubnetType.PRIVATE,
     )
     smol_lambda = Function(
         self,
         "SmolAPI",
         code=Code.from_asset_image(directory=abspath("./")),
         environment={
             "CAPTCHA_KEY": environ["CAPTCHA_KEY"],
             "SAFE_BROWSING_KEY": environ["SAFE_BROWSING_KEY"],
         },
         function_name=FUNCTION_NAME,
         handler=Handler.FROM_IMAGE,
         log_retention=RetentionDays.ONE_WEEK,
         memory_size=MEMORY_ALLOCATION,
         reserved_concurrent_executions=RESERVED_CONCURRENCY,
         runtime=Runtime.FROM_IMAGE,
         timeout=Duration.seconds(TIMEOUT_SEC),
         tracing=Tracing.ACTIVE,
         vpc=smol_vpc,
         vpc_subnets=smol_subnets,
     )
     smol_table.table.grant(smol_lambda, "dynamodb:DescribeTable")
     smol_table.table.grant(smol_lambda, "dynamodb:GetItem")
     smol_table.table.grant(smol_lambda, "dynamodb:PutItem")
     SmolTarget(self, "SmolTarget", smol_lambda, API_HOST)
    def __init__(self, scope: cdk.Stack, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Create the VPC resource.
        self._vpc = Vpc(self, "MyVPC", cidr="10.10.0.0/16")
        # Create a Security Group within the VPC that is used to allow
        # management traffic from designated jump hosts.
        self._sg = SecurityGroup(
            self,
            "MySG",
            vpc=self._vpc,
            allow_all_outbound=False,
            description="Management traffic from jump boxes",
            security_group_name="jumpbox-mgmt-traffic")

        # Add ingress rules to the Security Group for the jump host
        # 10.255.0.10 to TCP/22 and TCP/3389.
        self._sg.add_ingress_rule(peer=Peer.ipv4("10.255.0.10/32"),
                                  connection=Port(
                                      protocol=Protocol.TCP,
                                      string_representation="host1",
                                      from_port=22,
                                      to_port=22))
        self._sg.add_ingress_rule(peer=Peer.ipv4("10.255.0.10/32"),
                                  connection=Port(
                                      protocol=Protocol.TCP,
                                      string_representation="host1",
                                      from_port=3389,
                                      to_port=3389))
Beispiel #8
0
    def __init__(self, scope: Construct, id: str, envs: EnvSettings):
        super().__init__(scope, id)

        self.ecr = BaseECR(self, "ECR", envs)

        self.key = BaseKMS(self, "KMS", envs)

        self.vpc = Vpc(self, "Vpc", nat_gateways=1)

        self.cluster = Cluster(self,
                               "WorkersCluster",
                               cluster_name="schema-ecs-cluster",
                               vpc=self.vpc)

        self.db = DatabaseInstance(
            self,
            "DataBase",
            database_name=envs.data_base_name,
            engine=DatabaseInstanceEngine.POSTGRES,
            storage_encrypted=True,
            allocated_storage=50,
            instance_type=InstanceType.of(InstanceClass.BURSTABLE2,
                                          InstanceSize.SMALL),
            vpc=self.vpc,
        )

        if self.db.secret:
            CfnOutput(
                self,
                id="DbSecretOutput",
                export_name=self.get_database_secret_arn_output_export_name(
                    envs),
                value=self.db.secret.secret_arn,
            )
    def __init__(self, scope: core.Construct, id: str, config: Dict,
                 vpc: ec2.Vpc, es_sg: ec2.SecurityGroup) -> None:
        super().__init__(scope, id)

        es_config = config['data']['elasticsearch']

        # Build ES domain construct parameter
        capacity_config = es.CapacityConfig(
            master_node_instance_type=es_config['capacity']['masterNodes']
            ['instanceType'],
            master_nodes=es_config['capacity']['masterNodes']['count'],
            data_node_instance_type=es_config['capacity']['dataNodes']
            ['instanceType'],
            data_nodes=es_config['capacity']['dataNodes']['count'],
        )

        vpc_options = es.VpcOptions(
            security_groups=[es_sg],
            subnets=vpc.select_subnets(
                subnet_group_name=es_config['subnetGroupName']).subnets,
        )

        ebs_options = es.EbsOptions(volume_size=es_config['ebs']['volumeSize'])

        zone_awareness = es.ZoneAwarenessConfig(
            availability_zone_count=es_config['zoneAwareness']['count'],
            enabled=es_config['zoneAwareness']['enabled'],
        )

        logging_options = es.LoggingOptions(
            app_log_enabled=es_config['logging']['appLogEnabled'],
            audit_log_enabled=es_config['logging']['auditLogEnabled'],
            slow_index_log_enabled=es_config['logging']['slowIndexLogEnabled'],
            slow_search_log_enabled=es_config['logging']
            ['slowIearchLogEnabled'])

        access_policy = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            principals=[iam.AnyPrincipal()],
            actions=['es:*'],
            resources=[
                "arn:aws:es:" + config['awsRegion'] + ":" +
                config['awsAccount'] + ":domain/" + es_config['domainName'] +
                "/*"
            ])

        # Create ES domain
        es.Domain(
            self,
            'Domain',
            domain_name=es_config['domainName'],
            version=es.ElasticsearchVersion.of(es_config['version']),
            capacity=capacity_config,
            ebs=ebs_options,
            zone_awareness=zone_awareness,
            vpc_options=vpc_options,
            logging=logging_options,
            access_policies=[access_policy],
        )
Beispiel #10
0
 def __init__(self, scope: cdk.Construct, id: str, cidr=None, **kwargs):
     super().__init__(scope, id)
     self.cidr = cidr or "10.0.0.0/16"
     self.vpc = Vpc(
         self,
         f"{id}Vpc",
         cidr=self.cidr,
         **kwargs,
     )
Beispiel #11
0
    def execute(scope: Construct, subnet_configurations: list) -> Vpc:
        vpc = Vpc(
            scope=scope,
            id="vpc",
            cidr="172.16.0.0/16",
            subnet_configuration=subnet_configurations
        )

        return vpc
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # implement the glue data catalog databases used in the data lake
        catalog = DataLakeCatalog(self, 'DataLakeCatalog')
        self.__raw_glue_db = catalog.raw_database
        self.__clean_glue_db = catalog.clean_database
        self.__curated_glue_db = catalog.transform_database
        self.__audit_glue_db = Database(self, 'AuditGlueDB', database_name='ara_audit_data_' + self.account)


        # implement the S3 buckets for the data lake
        storage = DataLakeStorage(self, 'DataLakeStorage')
        self.__logs_s3_bucket = AutoEmptyBucket(
            self, 'Logs',
            bucket_name='ara-logs-' + self.account,
            uuid=AutoEmptyConfig.FOUNDATIONS_UUID
        ).bucket

        self.__raw_s3_bucket = storage.raw_bucket
        self.__clean_s3_bucket = storage.clean_bucket
        self.__curated_s3_bucket = storage.transform_bucket

        AuditTrailGlue(self, 'GlueAudit',
            log_bucket=self.__logs_s3_bucket,
            audit_bucket=self.__curated_s3_bucket,
            audit_db=self.__audit_glue_db,
            audit_table=self.__curated_s3_bucket.bucket_name
        )

        # the vpc used for the overall data lake (same vpc, different subnet for modules)
        self.__vpc = Vpc(self, 'Vpc')
        self.__public_subnets = self.__vpc.select_subnets(subnet_type=SubnetType.PUBLIC)
        self.__private_subnets = self.__vpc.select_subnets(subnet_type=SubnetType.PRIVATE)
        self.__vpc.add_gateway_endpoint("S3GatewayEndpoint",
                                        service=GatewayVpcEndpointAwsService.S3,
                                        subnets=[SubnetSelection(subnet_type=SubnetType.PUBLIC),
                                                 SubnetSelection(subnet_type=SubnetType.PRIVATE)])

        # IAM groups
        self.__admin_group = Group(self, 'GroupAdmins', group_name='ara-admins')
        self.__analysts_group = Group(self, 'GroupAnalysts', group_name='ara-analysts')
        self.__developers_group = Group(self, 'GroupDevelopers', group_name='ara-developers')
Beispiel #13
0
    def select_vpc(self, scope: BaseApp) -> Vpc:
        vpc_filters = scope.environment_config.get("vpcSelectionFilter", {})

        return Vpc.from_lookup(
            self,
            scope.prefixed_str("vpc"),
            vpc_id=vpc_filters.get("vpcId"),
            vpc_name=vpc_filters.get("vpcName"),
            is_default=vpc_filters.get("isDefault"),
            tags=vpc_filters.get("tags"),
        )
Beispiel #14
0
    def __init__(self, scope: cdk.Construct, construct_id: str, config,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self.vpc = Vpc(self,
                       "ec2-vpc-altimeter",
                       max_azs=1,
                       subnet_configuration=[
                           SubnetConfiguration(name="Public",
                                               subnet_type=SubnetType.PUBLIC),
                           SubnetConfiguration(name="Private",
                                               subnet_type=SubnetType.PRIVATE)
                       ])
        cdk.Tags.of(self.vpc).add("Name", "vpc-audit--altimeter")
 def get_vpc(self):
     vpc = Vpc(self._stack,
               self._name + 'Vpc',
               cidr='10.0.0.0/16',
               nat_gateways=0,
               max_azs=MAX_AVAILABILITY_ZONES)
     self._export('VpcId', vpc.vpc_id)
     self._export('VpcCidrBlock', vpc.vpc_cidr_block)
     for i in range(MAX_AVAILABILITY_ZONES):
         self._export(f'AvailabilityZone{i}', vpc.availability_zones[i])
         self._export(f'PublicSubnetId{i}', vpc.public_subnets[i].subnet_id)
         self._export(f'IsolatedSubnet{i}',
                      vpc.isolated_subnets[i].subnet_id)
     self._tag_it(vpc)
     return vpc
Beispiel #16
0
    def __init__(self, scope: Construct, stack_id: str, *, props: BaseFarmStackProps, **kwargs):
        """
        Initialize a new instance of BaseFarmStack
        """
        super().__init__(scope, stack_id, **kwargs)

         # The VPC that all components of the render farm will be created in.
        self.vpc = Vpc(
            self,
            'Vpc',
            max_azs=2,
        )

        version = VersionQuery(
            self,
            'Version',
            version=props.deadline_version,
        )

        images = ThinkboxDockerImages(
            self,
            'Images',
            version=version,
            user_aws_thinkbox_eula_acceptance=props.accept_aws_thinkbox_eula,
        )

        repository = Repository(
            self,
            'Repository',
            removal_policy=RepositoryRemovalPolicies(
                database=RemovalPolicy.DESTROY,
                filesystem=RemovalPolicy.DESTROY,
            ),
            vpc=self.vpc,
            version=version,
        )

        self.render_queue = RenderQueue(
            self,
            'RenderQueue',
            vpc=self.vpc,
            version=version,
            images=images,
            repository=repository,
            deletion_protection=False,
        )
Beispiel #17
0
    def __init__(self, scope: cdk.Stack, construct_id: str, vpc_cidr: str,
                 jump_host: str, mgmt_ports: list, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # args:
        # - vpc_cidr (str): The CIDR range for the VPC.
        # - jump_host (str): An optional IP address for the jump host. If this
        #                    is not specified, the Security Group will not be
        #                    created.
        # - mgmt_ports (list): A list of TCP ports which the jump host is
        #                      allowed to connect to.

        # Create the VPC resource with the given CIDR range.
        self._vpc = Vpc(self, "MyVPC", cidr=vpc_cidr)

        # Security Group only created if the jump host parameter was
        # specified.
        if jump_host is not None and len(jump_host) > 0:
            self.create_sg(jump_host, mgmt_ports)
def get_vpc(scope: Construct) -> Vpc:
    config = get_cluster_config()
    stack_name = config.stack_name
    return Vpc.from_vpc_attributes(
        scope,
        'vpc',
        vpc_id=Fn.import_value(stack_name + 'VpcId'),
        vpc_cidr_block=Fn.import_value(stack_name + 'VpcCidrBlock'),
        availability_zones=[
            Fn.import_value(stack_name + 'AvailabilityZone0'),
            Fn.import_value(stack_name + 'AvailabilityZone1'),
        ],
        public_subnet_ids=[
            Fn.import_value(stack_name + 'PublicSubnetId0'),
            Fn.import_value(stack_name + 'PublicSubnetId1'),
        ],
        isolated_subnet_ids=[
            Fn.import_value(stack_name + 'IsolatedSubnet0'),
            Fn.import_value(stack_name + 'IsolatedSubnet1'),
        ],
    )
Beispiel #19
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        custom_vpc = Vpc(
            self,
            "Vpc",
            max_azs=3,
            cidr="10.0.0.0/16",
            subnet_configuration=[
                SubnetConfiguration(cidr_mask=24,
                                    name="ingress",
                                    subnet_type=SubnetType.PUBLIC),
                SubnetConfiguration(cidr_mask=24,
                                    name="app",
                                    subnet_type=SubnetType.ISOLATED),
            ])

        core.Tag.add(custom_vpc, "Author", "Tinette")

        core.CfnOutput(self,
                       "Vpc",
                       value=custom_vpc.vpc_id,
                       export_name="custom_vpc_id")
Beispiel #20
0
    def create_vpc(self, scope: BaseApp, eks_enabled: bool = True) -> Vpc:
        vpc = Vpc(
            self,
            scope.prefixed_str(
                scope.environment_config.get('vpc', {}).get('name')),
            cidr=scope.environment_config.get('vpc', {}).get('cidr'),
            max_azs=scope.environment_config.get('vpc', {}).get('maxAZs'),
            enable_dns_hostnames=True,
            enable_dns_support=True,
            subnet_configuration=self._get_subnet_configuration(scope),
        )

        if eks_enabled:
            for subnet in vpc.public_subnets:
                Tag.add(subnet, "kubernetes.io/role/elb", "1")
                Tag.add(
                    subnet,
                    f"kubernetes.io/cluster/{scope.prefixed_str(scope.environment_config.get('eks', {}).get('clusterName'))}",
                    "shared")
            for subnet in vpc.private_subnets:
                Tag.add(subnet, "kubernetes.io/role/internal-elb", "1")
                Tag.add(
                    subnet,
                    f"kubernetes.io/cluster/{scope.prefixed_str(scope.environment_config.get('eks', {}).get('clusterName'))}",
                    "shared")

        if scope.environment_config.get('vpc', {}).get('bastionHost',
                                                       {}).get('enabled'):
            BastionHostLinux(self,
                             scope.prefixed_str('BastionHost'),
                             vpc=vpc,
                             instance_type=InstanceType(
                                 scope.environment_config.get('vpc', {}).get(
                                     'bastionHost', {}).get('instanceType')),
                             instance_name=scope.prefixed_str('BastionHost'))
        return vpc
Beispiel #21
0
    def __init__(self, scope: Construct, stack_id: str, *, props: SEPStackProps, **kwargs):
        """
        Initialize a new instance of SEPStack
        :param scope: The scope of this construct.
        :param stack_id: The ID of this construct.
        :param props: The properties for this construct.
        :param kwargs: Any kwargs that need to be passed on to the parent class.
        """
        super().__init__(scope, stack_id, **kwargs)

         # The VPC that all components of the render farm will be created in.
        vpc = Vpc(
            self,
            'Vpc',
            max_azs=2,
        )

        recipes = ThinkboxDockerRecipes(
            self,
            'Image',
            stage=Stage.from_directory(props.docker_recipes_stage_path),
        )

        repository = Repository(
            self,
            'Repository',
            vpc=vpc,
            version=recipes.version,
            repository_installation_timeout=Duration.minutes(20),
            # TODO - Evaluate deletion protection for your own needs. These properties are set to RemovalPolicy.DESTROY
            # to cleanly remove everything when this stack is destroyed. If you would like to ensure
            # that these resources are not accidentally deleted, you should set these properties to RemovalPolicy.RETAIN
            # or just remove the removal_policy parameter.
            removal_policy=RepositoryRemovalPolicies(
                database=RemovalPolicy.DESTROY,
                filesystem=RemovalPolicy.DESTROY,
            ),
        )

        host = 'renderqueue'
        zone_name = 'deadline-test.internal'

        # Internal DNS zone for the VPC.
        dns_zone = PrivateHostedZone(
            self,
            'DnsZone',
            vpc=vpc,
            zone_name=zone_name,
        )

        ca_cert = X509CertificatePem(
            self,
            'RootCA',
            subject=DistinguishedName(
                cn='SampleRootCA',
            ),
        )

        server_cert = X509CertificatePem(
            self,
            'RQCert',
            subject=DistinguishedName(
                cn=f'{host}.{dns_zone.zone_name}',
                o='RFDK-Sample',
                ou='RenderQueueExternal',
            ),
            signing_certificate=ca_cert,
        )

        render_queue = RenderQueue(
            self,
            'RenderQueue',
            vpc=vpc,
            version=recipes.version,
            images=recipes.render_queue_images,
            repository=repository,
            # TODO - Evaluate deletion protection for your own needs. This is set to false to
            # cleanly remove everything when this stack is destroyed. If you would like to ensure
            # that this resource is not accidentally deleted, you should set this to true.
            deletion_protection=False,
            hostname=RenderQueueHostNameProps(
                hostname=host,
                zone=dns_zone,
            ),
            traffic_encryption=RenderQueueTrafficEncryptionProps(
                external_tls=RenderQueueExternalTLSProps(
                    rfdk_certificate=server_cert,
                ),
                internal_protocol=ApplicationProtocol.HTTPS,
            ),
        )

        if props.create_resource_tracker_role:
            # Creates the Resource Tracker Access role. This role is required to exist in your account so the resource tracker will work properly
            Role(
                self,
                'ResourceTrackerRole',
                assumed_by=ServicePrincipal('lambda.amazonaws.com'),
                managed_policies= [ManagedPolicy.from_aws_managed_policy_name('AWSThinkboxDeadlineResourceTrackerAccessPolicy')],
                role_name= 'DeadlineResourceTrackerAccessRole',
            )

        fleet = SpotEventPluginFleet(
            self,
            'SpotEventPluginFleet',
            vpc=vpc,
            render_queue=render_queue,
            deadline_groups=['group_name'],
            instance_types=[InstanceType.of(InstanceClass.BURSTABLE3, InstanceSize.LARGE)],
            worker_machine_image=props.worker_machine_image,
            max_capacity=1,
        )

        # Optional: Add additional tags to both spot fleet request and spot instances.
        Tags.of(fleet).add('name', 'SEPtest')

        ConfigureSpotEventPlugin(
            self,
            'ConfigureSpotEventPlugin',
            vpc=vpc,
            render_queue=render_queue,
            spot_fleets=[fleet],
            configuration=SpotEventPluginSettings(
                enable_resource_tracker=True,
            ),
        )
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Create two VPCs - one to host our private website, the other to act as a client
        website_vpc = Vpc(
            self,
            "WEBSITEVPC",
            cidr="10.0.0.0/16",
        )
        client_vpc = Vpc(
            self,
            "ClientVPC",
            cidr="10.1.0.0/16",
        )

        # Create a bastion host in the client API which will act like our client workstation
        bastion = BastionHostLinux(
            self,
            "WEBClient",
            vpc=client_vpc,
            instance_name='my-bastion',
            instance_type=InstanceType('t3.micro'),
            machine_image=AmazonLinuxImage(),
            subnet_selection=SubnetSelection(subnet_type=SubnetType.PRIVATE),
            security_group=SecurityGroup(
                scope=self,
                id='bastion-sg',
                security_group_name='bastion-sg',
                description=
                'Security group for the bastion, no inbound open because we should access'
                ' to the bastion via AWS SSM',
                vpc=client_vpc,
                allow_all_outbound=True))

        # Set up a VPC peering connection between client and API VPCs, and adjust
        # the routing table to allow connections back and forth
        VpcPeeringHelper(self, 'Peering', website_vpc, client_vpc)

        # Create VPC endpoints for API gateway
        vpc_endpoint = InterfaceVpcEndpoint(
            self,
            'APIGWVpcEndpoint',
            vpc=website_vpc,
            service=InterfaceVpcEndpointAwsService.APIGATEWAY,
            private_dns_enabled=True,
        )
        vpc_endpoint.connections.allow_from(bastion, Port.tcp(443))
        endpoint_id = vpc_endpoint.vpc_endpoint_id

        api_policy = iam.PolicyDocument(statements=[
            iam.PolicyStatement(principals=[iam.AnyPrincipal()],
                                actions=['execute-api:Invoke'],
                                resources=['execute-api:/*'],
                                effect=iam.Effect.DENY,
                                conditions={
                                    "StringNotEquals": {
                                        "aws:SourceVpce": endpoint_id
                                    }
                                }),
            iam.PolicyStatement(principals=[iam.AnyPrincipal()],
                                actions=['execute-api:Invoke'],
                                resources=['execute-api:/*'],
                                effect=iam.Effect.ALLOW)
        ])

        # Create an s3 bucket to hold the content
        content_bucket = s3.Bucket(self,
                                   "ContentBucket",
                                   removal_policy=core.RemovalPolicy.DESTROY)

        # Upload our static content to the bucket
        s3dep.BucketDeployment(self,
                               "DeployWithInvalidation",
                               sources=[s3dep.Source.asset('website')],
                               destination_bucket=content_bucket)

        # Create a private API GW in the API VPC
        api = apigw.RestApi(self,
                            'PrivateS3Api',
                            endpoint_configuration=apigw.EndpointConfiguration(
                                types=[apigw.EndpointType.PRIVATE],
                                vpc_endpoints=[vpc_endpoint]),
                            policy=api_policy)

        # Create a role to allow API GW to access our S3 bucket contents
        role = iam.Role(
            self,
            "Role",
            assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com"))
        role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=[
                                    content_bucket.bucket_arn,
                                    content_bucket.bucket_arn + '/*'
                                ],
                                actions=["s3:Get*"]))

        # Create a proxy resource that captures all non-root resource requests
        resource = api.root.add_resource("{proxy+}")
        # Create an integration with S3
        resource_integration = apigw.Integration(
            type=apigw.IntegrationType.AWS,
            integration_http_method='GET',
            options=apigw.IntegrationOptions(
                request_parameters=
                {  # map the proxy parameter so we can pass the request path
                    "integration.request.path.proxy":
                    "method.request.path.proxy"
                },
                integration_responses=[
                    apigw.IntegrationResponse(
                        status_code='200',
                        response_parameters=
                        {  # map the content type of the S3 object back to the HTTP response
                            "method.response.header.Content-Type":
                            "integration.response.header.Content-Type"
                        })
                ],
                credentials_role=role),
            # reference the bucket content we want to retrieve
            uri='arn:aws:apigateway:eu-west-1:s3:path/%s/{proxy}' %
            (content_bucket.bucket_name))
        # handle the GET request and map it to our new integration
        resource.add_method(
            "GET",
            resource_integration,
            method_responses=[
                apigw.MethodResponse(status_code='200',
                                     response_parameters={
                                         "method.response.header.Content-Type":
                                         False
                                     })
            ],
            request_parameters={"method.request.path.proxy": True})
        # Handle requests to the root of our site
        # Create another integration with S3 - this time with no proxy parameter
        resource_integration = apigw.Integration(
            type=apigw.IntegrationType.AWS,
            integration_http_method='GET',
            options=apigw.IntegrationOptions(
                integration_responses=[
                    apigw.IntegrationResponse(
                        status_code='200',
                        response_parameters=
                        {  # map the content type of the S3 object back to the HTTP response
                            "method.response.header.Content-Type":
                            "integration.response.header.Content-Type"
                        })
                ],
                credentials_role=role),
            # reference the bucket content we want to retrieve
            uri='arn:aws:apigateway:eu-west-1:s3:path/%s/index.html' %
            (content_bucket.bucket_name))
        # handle the GET request and map it to our new integration
        api.root.add_method("GET",
                            resource_integration,
                            method_responses=[
                                apigw.MethodResponse(
                                    status_code='200',
                                    response_parameters={
                                        "method.response.header.Content-Type":
                                        False
                                    })
                            ])
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        attribute_tagged_group = Group(self, "Flexible Tagged")
        access_project = core.CfnTag(key="access-project", value="elysian")
        access_team = core.CfnTag(key="access-team", value="webdev")
        access_cost_center = core.CfnTag(key="cost-center", value="2600")

        flexible_boundary_policy = CfnManagedPolicy(
            self,
            "FlexiblePermissionBoundary",
            policy_document=json.loads(flexible_policy_permission_boundary),
        )

        CfnUser(
            self,
            "Developer",
            tags=[access_project, access_team, access_cost_center],
            groups=[attribute_tagged_group.group_name],
            permissions_boundary=flexible_boundary_policy.ref,
        )

        # Add AWS managed policy for EC2 Read Only access for the console.
        attribute_tagged_group.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                managed_policy_name="AmazonEC2ReadOnlyAccess"
            )
        )

        # Import a json policy and create CloudFormation Managed Policy
        CfnManagedPolicy(
            self,
            "FlexibleAttributePolicy",
            policy_document=json.loads(full_attribute_based_policy),
            groups=[attribute_tagged_group.group_name],
        )

        vpc = Vpc.from_lookup(self, "AttributeTaggedVPC", is_default=True)
        instance_type = InstanceType("t2.micro")
        ami = MachineImage.latest_amazon_linux()

        blocked_instance = Instance(
            self,
            "Blocked Instance",
            machine_image=ami,
            instance_type=instance_type,
            vpc=vpc,
        )
        # Re-use the AMI from t
        image_id = blocked_instance.instance.image_id

        # Can only add tags to CfnInstance as of cdk v1.31
        valid_instance = CfnInstance(
            self,
            "Valid Instance",
            image_id=image_id,
            instance_type="t2.micro",
            tags=[access_project, access_team, access_cost_center],
        )
        # Empty group as it's not need to complete our tests.
        test_security_group = SecurityGroup(
            self, "EmptySecurityGroup", vpc=vpc)

        core.CfnOutput(
            self,
            "BlockedInstance",
            value=blocked_instance.instance_id,
            export_name="elysian-blocked-instance",
        )

        core.CfnOutput(
            self,
            "ValidInstance",
            value=valid_instance.ref,
            export_name="elysian-valid-instance",
        )
        core.CfnOutput(
            self,
            "TestSecurityGroup",
            value=test_security_group.security_group_id,
            export_name="test-elysian-sg",
        )
        core.CfnOutput(
            self, "DefaultAMI", value=image_id, export_name="default-elysian-ami"
        )
Beispiel #24
0
class NetworkTier(Stack):
    """
    The network tier consists of all constructs that are required for the foundational
    networking between the various components of the Deadline render farm.
    """
    @builtins.property  # type: ignore
    @jsii.member(jsii_name="availabilityZones")
    def availability_zones(self) -> typing.List[builtins.str]:
        """
        This overrides the availability zones the Stack will use. The zones that we set here are what
        our VPC will use, so adding local zones to this return value will enable us to then deploy
        infrastructure to them.
        """
        return config.availability_zones_standard + config.availability_zones_local

    def __init__(self, scope: Construct, stack_id: str, **kwargs) -> None:
        """
        Initializes a new instance of NetworkTier
        """
        super().__init__(scope, stack_id, **kwargs)

        # We're creating a SubnetSelection with only the standard availability zones to be used to put
        # the NAT gateway in and the VPC interface endpoints, because the local zones do no have
        # these available.
        standard_zone_subnets = SubnetSelection(
            availability_zones=config.availability_zones_standard,
            subnet_type=SubnetType.PUBLIC)

        # The VPC that all components of the render farm will be created in. We are using the `availability_zones()`
        # method to override the availability zones that this VPC will use.
        self.vpc = Vpc(self,
                       'Vpc',
                       max_azs=len(self.availability_zones),
                       subnet_configuration=[
                           SubnetConfiguration(name='Public',
                                               subnet_type=SubnetType.PUBLIC,
                                               cidr_mask=28),
                           SubnetConfiguration(name='Private',
                                               subnet_type=SubnetType.PRIVATE,
                                               cidr_mask=18)
                       ],
                       nat_gateway_subnets=standard_zone_subnets)

        # Add interface endpoints
        for idx, service_info in enumerate(_INTERFACE_ENDPOINT_SERVICES):
            service_name = service_info['name']
            service = service_info['service']
            self.vpc.add_interface_endpoint(service_name,
                                            service=service,
                                            subnets=standard_zone_subnets)

        # Add gateway endpoints
        for idx, service_info in enumerate(_GATEWAY_ENDPOINT_SERVICES):
            service_name = service_info['name']
            service = service_info['service']
            self.vpc.add_gateway_endpoint(service_name,
                                          service=service,
                                          subnets=[standard_zone_subnets])

        # Internal DNS zone for the VPC.
        self.dns_zone = PrivateHostedZone(self,
                                          'DnsZone',
                                          vpc=self.vpc,
                                          zone_name='deadline-test.internal')
Beispiel #25
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        username_tagged = Group(self, "Username Tagged")

        developer = User(self, "Developer")
        developer.add_to_group(username_tagged)

        # Add AWS managed policy for EC2 Read Only access for the console.
        username_tagged.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                managed_policy_name="AmazonEC2ReadOnlyAccess"
            )
        )

        # Import a json policy and create CloudFormation Managed Policy
        CfnManagedPolicy(
            self,
            "UserTaggedPolicy",
            policy_document=json.loads(username_based_policy),
            groups=[username_tagged.group_name],
        )

        vpc = Vpc.from_lookup(self, "UsernameTaggedVPC", is_default=True)
        instance_type = InstanceType("t2.micro")
        ami = MachineImage.latest_amazon_linux()

        blocked_instance = Instance(
            self,
            "Blocked Instance",
            machine_image=ami,
            instance_type=instance_type,
            vpc=vpc,
        )
        # Re-use the AMI from t
        image_id = blocked_instance.instance.image_id

        # Can only add tags to CfnInstance as of 1.31
        dev_username_tag = core.CfnTag(
            key="username", value=developer.user_name)
        valid_instance = CfnInstance(
            self,
            "Valid Instance",
            image_id=image_id,
            instance_type="t2.micro",
            tags=[dev_username_tag],
        )
        # Empty group as it's not need to complete our tests.
        test_security_group = SecurityGroup(
            self, "EmptySecurityGroup", vpc=vpc)

        core.CfnOutput(
            self,
            "BlockedInstance",
            value=blocked_instance.instance_id,
            export_name="username-blocked-instance",
        )

        core.CfnOutput(
            self,
            "ValidInstance",
            value=valid_instance.ref,
            export_name="username-valid-instance",
        )
        core.CfnOutput(
            self,
            "TestSecurityGroup",
            value=test_security_group.security_group_id,
            export_name="test-username-sg",
        )
        core.CfnOutput(
            self, "DefaultAMI", value=image_id, export_name="default-username-ami"
        )
Beispiel #26
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc,
                 region: str) -> None:
        super().__init__(scope, id)

        # create an IAM role to attach to the squid instances
        squid_iam_role = iam.Role(
            self,
            "squid-role",
            assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "CloudWatchAgentServerPolicy"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AmazonEC2RoleforSSM")
            ])

        # Add policy to allow EC2 update instance attributes
        squid_iam_role.add_to_policy(
            statement=iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                          actions=[
                                              'ec2:ModifyInstanceAttribute',
                                          ],
                                          resources=['*']))

        # Create bucket to hold Squid config and whitelist files
        squid_config_bucket = s3.Bucket(
            self, "squid-config", encryption=s3.BucketEncryption.KMS_MANAGED)

        # Upload config and whiteliest files to S3 bucket
        s3_deployment.BucketDeployment(
            self,
            "config",
            destination_bucket=squid_config_bucket,
            sources=[
                s3_deployment.Source.asset(
                    path='./squid_app/squid_config_files/config_files_s3')
            ])

        # Provide access to EC2 instance role to read and write to bucket
        squid_config_bucket.grant_read_write(identity=squid_iam_role)

        # Set the AMI to the latest Amazon Linux 2
        amazon_linux_2_ami = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        if vpc.public_subnets:
            # Squid ASGs with desired capacity as 1 Instance in each of the AZs
            self.squid_asgs = []
            for count, az in enumerate(vpc.availability_zones, start=1):
                asg = autoscaling.AutoScalingGroup(
                    self,
                    f"asg-{count}",
                    vpc=vpc,
                    instance_type=ec2.InstanceType("t3.nano"),
                    desired_capacity=1,
                    max_capacity=1,
                    min_capacity=1,
                    machine_image=amazon_linux_2_ami,
                    role=squid_iam_role,
                    vpc_subnets=ec2.SubnetSelection(
                        availability_zones=[az],
                        one_per_az=True,
                        subnet_type=ec2.SubnetType.PUBLIC),
                    health_check=autoscaling.HealthCheck.ec2(
                        grace=core.Duration.minutes(5)),
                    resource_signal_count=1,
                    resource_signal_timeout=core.Duration.minutes(10))

                cfn_asg: autoscaling.CfnAutoScalingGroup = asg.node.default_child
                asg_logical_id = cfn_asg.logical_id

                # User data: Required parameters in user data script
                user_data_mappings = {
                    "__S3BUCKET__": squid_config_bucket.bucket_name,
                    "__ASG__": asg_logical_id,
                    "__CW_ASG__": "${aws:AutoScalingGroupName}"
                }
                # Replace parameters with values in the user data
                with open(
                        "./squid_app/squid_config_files/user_data/squid_user_data.sh",
                        'r') as user_data_h:
                    # Use a substitution
                    user_data_sub = core.Fn.sub(user_data_h.read(),
                                                user_data_mappings)

                # Add User data to Launch Config of the autoscaling group
                asg.add_user_data(user_data_sub)

                # Security group attached to the ASG Squid instances
                # Outbound: All allowed
                # Inboud: Allowed from VPC CIDR on ports 80, 443)

                asg.connections.allow_from(
                    other=ec2.Peer.ipv4(vpc.vpc_cidr_block),
                    port_range=ec2.Port(protocol=ec2.Protocol.TCP,
                                        string_representation="HTTP from VPC",
                                        from_port=80,
                                        to_port=80))

                asg.connections.allow_from(
                    other=ec2.Peer.ipv4(vpc.vpc_cidr_block),
                    port_range=ec2.Port(protocol=ec2.Protocol.TCP,
                                        string_representation="HTTPS from VPC",
                                        from_port=443,
                                        to_port=443))

                # Create ASG Lifecycle hook to enable updating of route table using Lambda when instance launches and is marked Healthy

                autoscaling.LifecycleHook(
                    self,
                    f"asg-hook-{count}",
                    auto_scaling_group=asg,
                    lifecycle_transition=autoscaling.LifecycleTransition.
                    INSTANCE_LAUNCHING,
                    notification_target=hooktargets.TopicHook(
                        sns.Topic(self,
                                  f"squid-asg-{count}-lifecycle-hook-topic",
                                  display_name=
                                  f"Squid ASG {count} Lifecycle Hook topic")),
                    default_result=autoscaling.DefaultResult.ABANDON,
                    heartbeat_timeout=core.Duration.minutes(5))

                # Tag ASG with the route table IDs used by the isolated and/or private subnets in the availability zone
                # This tag will be used by the Squid Lambda function to identify route tables to update when alarm changes from ALARM to OK

                private_subnets_in_az = []
                isolated_subnets_in_az = []
                route_table_ids = ''

                if vpc.private_subnets:
                    private_subnets_in_az = vpc.select_subnets(
                        availability_zones=[az],
                        subnet_type=ec2.SubnetType.PRIVATE).subnets
                if vpc.isolated_subnets:
                    isolated_subnets_in_az = vpc.select_subnets(
                        availability_zones=[az],
                        subnet_type=ec2.SubnetType.ISOLATED).subnets

                non_public_subnets_in_az = isolated_subnets_in_az + private_subnets_in_az

                # Loop through all non public subnets in AZ to identify route table and create a tag value string
                for subnet in non_public_subnets_in_az:
                    if route_table_ids:
                        route_table_ids = f"{route_table_ids},{subnet.route_table.route_table_id}"
                    else:
                        route_table_ids = subnet.route_table.route_table_id

                # Tag the ASG with route table ids
                core.Tag.add(asg,
                             key='RouteTableIds',
                             value=route_table_ids,
                             apply_to_launched_instances=False)

                self.squid_asgs.append(asg)

        else:
            raise ValueError("No public subnets in VPC")
    def __init__(self, scope: core.Construct, id: str,
                 log_bucket: _s3.Bucket,
                 config_table: _dynamodb.Table,
                 tshirt_size: str,
                 sink_bucket: _s3.Bucket,
                 vpc: _ec2.Vpc,
                 **kwargs) -> None:

        super().__init__(scope, id, **kwargs)

        service_role = _iam.Role(
            self, 'BatchEmrServiceRole',
            assumed_by=_iam.ServicePrincipal('elasticmapreduce.amazonaws.com')
        )

        service_role.add_managed_policy(_iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AmazonElasticMapReduceRole'))

        cluster_role = _iam.Role(
            self, 'BatchEmrClusterRole',
            assumed_by=_iam.ServicePrincipal("ec2.amazonaws.com")
        )

        _iam.Policy(
            self, 'BatchEmrClusterPolicy',
            statements=[
                _iam.PolicyStatement(
                    actions=[
                        "glue:CreateDatabase",
                        "glue:UpdateDatabase",
                        "glue:DeleteDatabase",
                        "glue:GetDatabase",
                        "glue:GetDatabases",
                        "glue:CreateTable",
                        "glue:UpdateTable",
                        "glue:DeleteTable",
                        "glue:GetTable",
                        "glue:GetTables",
                        "glue:GetTableVersions",
                        "glue:CreatePartition",
                        "glue:BatchCreatePartition",
                        "glue:UpdatePartition",
                        "glue:DeletePartition",
                        "glue:BatchDeletePartition",
                        "glue:GetPartition",
                        "glue:GetPartitions",
                        "glue:BatchGetPartition",
                        "glue:CreateUserDefinedFunction",
                        "glue:UpdateUserDefinedFunction",
                        "glue:DeleteUserDefinedFunction",
                        "glue:GetUserDefinedFunction",
                        "glue:GetUserDefinedFunctions",
                        "cloudwatch:PutMetricData",
                        "dynamodb:ListTables",
                        "s3:HeadBucket",
                        "ec2:Describe*",
                    ],
                    resources=['*']
                ),
                _iam.PolicyStatement(
                    actions=['s3:GetObject'],
                    resources=[
                        'arn:aws:s3:::' + ARA_BUCKET_NAME + BINARIES + DataGenConfig.DSDGEN_INSTALL_SCRIPT,
                        'arn:aws:s3:::' + ARA_BUCKET_NAME + BINARIES + DataGenConfig.JAR_FILE
                    ]
                ),
                _iam.PolicyStatement(
                    actions=['s3:PutObject'],
                    resources=[log_bucket.bucket_arn + "/data-generator/*"]
                ),
                _iam.PolicyStatement(
                    actions=[
                        "s3:AbortMultipartUpload",
                        "s3:CreateBucket",
                        "s3:DeleteObject",
                        "s3:GetBucketVersioning",
                        "s3:GetObject",
                        "s3:GetObjectTagging",
                        "s3:GetObjectVersion",
                        "s3:ListBucket",
                        "s3:ListBucketMultipartUploads",
                        "s3:ListBucketVersions",
                        "s3:ListMultipartUploadParts",
                        "s3:PutBucketVersioning",
                        "s3:PutObject",
                        "s3:PutObjectTagging"
                    ],
                    resources=[
                        sink_bucket.bucket_arn + '/*',
                        sink_bucket.bucket_arn

                    ]
                )
            ],
            roles=[cluster_role]
        )

        cluster_role.add_managed_policy(_iam.ManagedPolicy.from_aws_managed_policy_name('AmazonSSMManagedInstanceCore'))

        _iam.CfnInstanceProfile(
            self, 'BatchEmrClusterInstanceProfile',
            roles=[cluster_role.role_name],
            instance_profile_name=cluster_role.role_name
        )

        # Security Groups for the EMR cluster (private subnet)
        # https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-man-sec-groups.html#emr-sg-elasticmapreduce-master-private
        master_sg = _ec2.SecurityGroup(self, 'ElasticMapReduce-Master-Private', vpc=vpc)
        slave_sg = _ec2.SecurityGroup(self, 'ElasticMapReduce-Slave-Private', vpc=vpc)
        service_sg = _ec2.SecurityGroup(self, 'ElasticMapReduce-ServiceAccess', vpc=vpc, allow_all_outbound=False)

        # Service SG used by the proxy instance
        service_sg.add_ingress_rule(master_sg, _ec2.Port.tcp(9443))
        service_sg.add_egress_rule(master_sg, _ec2.Port.tcp(8443))
        service_sg.add_egress_rule(slave_sg, _ec2.Port.tcp(8443))

        # EMR Master
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_icmp())
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_tcp())
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_udp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_icmp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_tcp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_udp())
        master_sg.add_ingress_rule(service_sg, _ec2.Port.tcp(8443))

        # EMR Slave
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_icmp())
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_tcp())
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_udp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_icmp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_tcp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_udp())
        slave_sg.add_ingress_rule(service_sg, _ec2.Port.tcp(8443))

        with open('common/common_cdk/lambda/datagen_config.py', 'r') as f:
            lambda_source = f.read()

        configure_datagen_function = _lambda.SingletonFunction(
            self, 'BatchConfigureDatagenLambda',
            uuid="58a9a222-ff07-11ea-adc1-0242ac120002",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            function_name='datagen-config',
            environment={
                'TABLE_NAME': config_table.table_name,
                'JAR_LOCATION': BINARIES_LOCATION + DataGenConfig.JAR_FILE,
            },
            timeout=core.Duration.seconds(10)
        )

        configure_datagen_function.role.add_to_policy(
            _iam.PolicyStatement(
                actions=[
                    'dynamodb:GetItem',
                    'dynamodb:PutItem',
                ],
                resources=[config_table.table_arn]
            )
        )

        terminate_cluster = _sfn_tasks.EmrTerminateCluster(
            self, 'BatchDeleteCluster',
            cluster_id=_sfn.TaskInput.from_data_at("$.Emr.Cluster.Id").value,
            integration_pattern=_sfn.IntegrationPattern.RUN_JOB,
        )

        terminate_cluster_error = _sfn_tasks.EmrTerminateCluster(
            self, 'BatchDeleteClusterError',
            cluster_id=_sfn.TaskInput.from_data_at("$.Emr.Cluster.Id").value,
            integration_pattern=_sfn.IntegrationPattern.RUN_JOB,
        ).next(_sfn.Fail(self, 'StepFailure'))

        create_cluster = _sfn_tasks.EmrCreateCluster(
            self, "BatchCreateEMRCluster",
            name="BatchDatagenCluster",
            result_path="$.Emr",
            release_label='emr-5.30.1',
            log_uri=log_bucket.s3_url_for_object() + "/data-generator",
            cluster_role=cluster_role,
            service_role=service_role,
            bootstrap_actions=[
                _sfn_tasks.EmrCreateCluster.BootstrapActionConfigProperty(
                    name="dsdgen-install",
                    script_bootstrap_action=_sfn_tasks.EmrCreateCluster.ScriptBootstrapActionConfigProperty(
                        path=BINARIES_LOCATION + DataGenConfig.DSDGEN_INSTALL_SCRIPT,
                    )
                )
            ],
            applications=[
                _sfn_tasks.EmrCreateCluster.ApplicationConfigProperty(
                    name="spark"
                ),
                _sfn_tasks.EmrCreateCluster.ApplicationConfigProperty(
                    name="hadoop"
                )
            ],
            instances=_sfn_tasks.EmrCreateCluster.InstancesConfigProperty(
                emr_managed_master_security_group=master_sg.security_group_id,
                emr_managed_slave_security_group=slave_sg.security_group_id,
                service_access_security_group=service_sg.security_group_id,
                ec2_subnet_ids=vpc.select_subnets().subnet_ids,
                instance_fleets=[
                    _sfn_tasks.EmrCreateCluster.InstanceFleetConfigProperty(
                        instance_fleet_type=_sfn_tasks.EmrCreateCluster.InstanceRoleType.MASTER,
                        instance_type_configs=[
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5.xlarge',
                                weighted_capacity=1
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5a.xlarge',
                                weighted_capacity=1
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m4.xlarge',
                                weighted_capacity=1
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5d.xlarge',
                                weighted_capacity=1
                            ),
                        ],
                        launch_specifications=_sfn_tasks.EmrCreateCluster.InstanceFleetProvisioningSpecificationsProperty(
                            spot_specification=_sfn_tasks.EmrCreateCluster.SpotProvisioningSpecificationProperty(
                                timeout_action=_sfn_tasks.EmrCreateCluster.SpotTimeoutAction.SWITCH_TO_ON_DEMAND,
                                timeout_duration_minutes=5
                            )
                        ),
                        target_on_demand_capacity=0,
                        target_spot_capacity=1
                    ),
                    _sfn_tasks.EmrCreateCluster.InstanceFleetConfigProperty(
                        instance_fleet_type=_sfn_tasks.EmrCreateCluster.InstanceRoleType.CORE,
                        instance_type_configs=[
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5.xlarge',
                                weighted_capacity=1
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5.2xlarge',
                                weighted_capacity=2
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5a.xlarge',
                                weighted_capacity=1
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5a.2xlarge',
                                weighted_capacity=2
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m4.xlarge',
                                weighted_capacity=1
                            )
                        ],
                        launch_specifications=_sfn_tasks.EmrCreateCluster.InstanceFleetProvisioningSpecificationsProperty(
                            spot_specification=_sfn_tasks.EmrCreateCluster.SpotProvisioningSpecificationProperty(
                                timeout_action=_sfn_tasks.EmrCreateCluster.SpotTimeoutAction.SWITCH_TO_ON_DEMAND,
                                timeout_duration_minutes=5
                            )
                        ),
                        target_on_demand_capacity=0,
                        target_spot_capacity=DataGenConfig.BATCH_CLUSTER_SIZE[tshirt_size]

                    )
                ]
            )
        ).add_catch(handler=terminate_cluster_error, result_path="$.error")

        configure_datagen = _sfn_tasks.LambdaInvoke(
            self, "BatchConfigureDatagenTask",
            lambda_function=configure_datagen_function,
            payload=_sfn.TaskInput.from_text('{'
                                             '"Param": "batch_iterator",'
                                             '"Module": "batch",'
                                             '"SinkBucket": "'+sink_bucket.s3_url_for_object()+'",'
                                             '"Parallelism": "'+str(int(DataGenConfig.BATCH_DATA_SIZE[tshirt_size])*2)+'",'
                                             '"DataSize": "'+DataGenConfig.BATCH_DATA_SIZE[tshirt_size]+'",'
                                             '"TmpBucket": "fake-bucket"'
                                             '}'),
            result_path='$.Config'
        ).add_catch(handler=terminate_cluster_error, result_path="$.error")

        add_datagen_step = _sfn.CustomState(
            self, 'BatchAddDataGenStep',
            state_json={
                "Type": "Task",
                "Resource": "arn:aws:states:::elasticmapreduce:addStep.sync",
                "Parameters": {
                    "ClusterId.$": "$.Emr.Cluster.Id",
                    "Step": {
                        "Name": "DatagenStep",
                        "ActionOnFailure": "CONTINUE",
                        "HadoopJarStep": {
                            "Jar": "command-runner.jar",
                            "Args.$": "$.Config.Payload.StepParam"
                        }
                    }
                },
                "ResultPath": "$.Step",
                "Next": "BatchUpdateIterator",
                "Catch": [
                    {
                        "ErrorEquals": ["States.ALL"],
                        "Next": "BatchDeleteClusterError",
                        "ResultPath": "$.error"
                    }
                ]
            }
        )

        update_iterator = _sfn_tasks.DynamoUpdateItem(
            self, 'BatchUpdateIterator',
            table=config_table,
            key={
                'param': _sfn_tasks.DynamoAttributeValue.from_string('batch_iterator')
            },
            update_expression='SET iterator = if_not_exists(iterator, :start) + :inc',
            expression_attribute_values={
                ":inc": _sfn_tasks.DynamoAttributeValue.from_number(1),
                ":start": _sfn_tasks.DynamoAttributeValue.from_number(0)
            },
            result_path=_sfn.JsonPath.DISCARD
        )

        definition = configure_datagen \
            .next(create_cluster) \
            .next(add_datagen_step) \
            .next(update_iterator) \
            .next(terminate_cluster)

        datagen_stepfunctions = _sfn.StateMachine(
            self, "BatchDataGenStepFunctions",
            definition=definition,
            timeout=core.Duration.minutes(30)
        )

        datagen_stepfunctions.add_to_role_policy(
            _iam.PolicyStatement(
                actions=[
                    'elasticmapreduce:AddJobFlowSteps',
                    'elasticmapreduce:DescribeStep'
                ],
                resources=['*']
            )
        )
        datagen_stepfunctions.add_to_role_policy(
            _iam.PolicyStatement(
                actions= [
                    "iam:CreateServiceLinkedRole",
                    "iam:PutRolePolicy"
                ],
                resources=["arn:aws:iam::*:role/aws-service-role/elasticmapreduce.amazonaws.com*/AWSServiceRoleForEMRCleanup*"],
                conditions= {
                    "StringLike": {
                        "iam:AWSServiceName": [
                            "elasticmapreduce.amazonaws.com",
                            "elasticmapreduce.amazonaws.com.cn"
                        ]
                    }
                }
            )
        )

        step_trigger = _events.Rule(
            self, 'BatchSteptrigger',
            schedule=_events.Schedule.cron(minute='0/30',
                                           hour='*',
                                           month='*',
                                           week_day='*',
                                           year='*')
        )

        step_trigger.add_target(_events_targets.SfnStateMachine(machine=datagen_stepfunctions))

        with open('common/common_cdk/lambda/stepfunctions_trigger.py', 'r') as f:
            lambda_source = f.read()

        stepfunctions_trigger_lambda = _lambda.SingletonFunction(
            self, 'BatchStepFunctionsTriggerLambda',
            uuid="9597f6f2-f840-11ea-adc1-0242ac120002",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            function_name='stepfunctions-batch-datagen-trigger'
        )

        stepfunctions_trigger_lambda.role.add_to_policy(
            _iam.PolicyStatement(
                actions=["states:StartExecution"],
                resources=['*']
            )
        )

        trigger_step_lambda_provider = _custom_resources.Provider(
            self, 'StepFunctionsTriggerLambdaProvider',
            on_event_handler=stepfunctions_trigger_lambda
        )

        core.CustomResource(
            self, 'StepFunctionsTrigger',
            service_token=trigger_step_lambda_provider.service_token,
            properties={
                "stepArn": datagen_stepfunctions.state_machine_arn
            }
        )

        # terminate clusters
        with open('common/common_cdk/lambda/stepfunctions_terminate_emr.py', 'r') as f:
            lambda_source = f.read()

        sfn_terminate = _lambda.SingletonFunction(
            self, 'StepFuncTerminateBatch',
            uuid='58a9a422-ff07-11ea-adc1-0242ac120002',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            timeout=core.Duration.minutes(5)
        )

        sfn_terminate.role.add_to_policy(
            _iam.PolicyStatement(
                actions=[
                    'elasticmapreduce:ListClusters',
                    'elasticmapreduce:TerminateJobFlows',
                    'states:ListStateMachines',
                    'states:ListExecutions',
                    'states:StopExecution'
                ],
                resources=['*']
            )
        )

        sfn_terminate_provider = _custom_resources.Provider(
            self, 'StepFuncTerminateBatchLambdaProvider',
            on_event_handler=sfn_terminate
        )

        core.CustomResource(
            self, 'StepFuncTerminateBatchCustomResource',
            service_token=sfn_terminate_provider.service_token,
            properties={
                "state_machine": 'BatchDatagen'
            })
Beispiel #28
0
 def __init__(self, scope: Construct, id: str, deploy_env: str, config: dict, **kwargs) -> None:
     super().__init__(scope, id, **kwargs)
     self.config = config
     self.deploy_env = deploy_env
     self.vpc = Vpc(self,  f"AirflowVPC-{deploy_env}", cidr="10.0.0.0/16", max_azs=config["max_vpc_azs"])
Beispiel #29
0
    def __init__(self, scope: Construct, stack_id: str, **kwargs) -> None:
        """
        Initializes a new instance of NetworkTier
        :param scope: The scope of this construct.
        :param stack_id: The ID of this construct.
        :param kwargs: The stack properties.
        """
        super().__init__(scope, stack_id, **kwargs)

        # The VPC that all components of the render farm will be created in.
        self.vpc = Vpc(
            self,
            'Vpc',
            max_azs=2,
            subnet_configuration=[
                SubnetConfiguration(
                    name='Public',
                    subnet_type=SubnetType.PUBLIC,
                    cidr_mask=28
                ),
                SubnetConfiguration(
                    name='Private',
                    subnet_type=SubnetType.PRIVATE,
                    cidr_mask=18  # 16,382 IP addresses
                )
            ]
        )
        # VPC flow logs are a security best-practice as they allow us
        # to capture information about the traffic going in and out of
        # the VPC. For more information, see the README for this app.
        self.vpc.add_flow_log(
            'NetworkTierFlowLogs',
            destination=FlowLogDestination.to_cloud_watch_logs(),
            traffic_type=FlowLogTrafficType.ALL
        )

        # TODO - Create a NetworkAcl for your VPC that only allows
        # network traffic required for your render farm. This is a
        # security best-practice to ensure the safety of your farm.
        # The default network ACLs allow all traffic by default,
        # whereas custom network ACLs deny all traffic by default.
        # For more information, see the README for this app.
        #
        # Example code to create a custom network ACL:
        # acl = NetworkAcl(
        #     self,
        #     'ACL',
        #     vpc=self.vpc,
        #     subnet_selection=SubnetSelection(
        #         subnets=self.vpc.public_subnets
        #     )
        # )
        #
        # You can optionally add rules to allow traffic (e.g. SSH):
        # acl.add_entry(
        #     'SSH',
        #     cidr=AclCidr.ipv4(
        #         # some-ipv4-address-cidr
        #     ),
        #     traffic=AclTraffic.tcp_port(22),
        #     rule_number=1
        # )
        endpoint_subnets = SubnetSelection(subnet_type=SubnetType.PRIVATE)

        # Add interface endpoints
        for idx, service_info in enumerate(_INTERFACE_ENDPOINT_SERVICES):
            service_name = service_info['name']
            service = service_info['service']
            self.vpc.add_interface_endpoint(
                f'{service_name}{idx}',
                service=service,
                subnets=endpoint_subnets
            )

        # Add gateway endpoints
        for idx, service_info in enumerate(_GATEWAY_ENDPOINT_SERVICES):
            service_name = service_info['name']
            service = service_info['service']
            self.vpc.add_gateway_endpoint(
                service_name,
                service=service,
                subnets=[endpoint_subnets]
            )

        # Internal DNS zone for the VPC.
        self.dns_zone = PrivateHostedZone(
            self,
            'DnsZone',
            vpc=self.vpc,
            zone_name='deadline-test.internal'
        )
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        vpc = Vpc(self, "MyVpc", max_azs=2)

        ecs_cluster = Cluster(self, 'FagateCluster', vpc=vpc)

        alb = ApplicationLoadBalancer(self,
                                      'EcsLb',
                                      vpc=vpc,
                                      internet_facing=True)

        listener = alb.add_listener('EcsListener', port=80)

        listener.add_fixed_response('Default-Fix', status_code='404')
        listener.node.default_child.default_action = [{
            "type": "fixed-response",
            "fixedResponseConfig": {
                "statusCode": "404"
            }
        }]

        website_bucket = Bucket(self,
                                'PetclinicWebsite',
                                website_index_document='index.html',
                                public_read_access=True,
                                removal_policy=core.RemovalPolicy.DESTROY)

        deployment = BucketDeployment(
            self,
            'PetclinicDeployWebsite',
            sources=[Source.asset('./spring-petclinic-static')],
            destination_bucket=website_bucket,
            retain_on_delete=False
            #destination_key_prefix='web/static'
        )

        # Modify the config.js with CF custome resource
        modify_policy = [
            PolicyStatement(actions=[
                "s3:PutObject", "s3:PutObjectAcl", "s3:PutObjectVersionAcl",
                "s3:GetObject"
            ],
                            effect=Effect.ALLOW,
                            resources=[website_bucket.bucket_arn + "/*"]),
            PolicyStatement(actions=["s3:ListBucket"],
                            effect=Effect.ALLOW,
                            resources=[website_bucket.bucket_arn]),
            PolicyStatement(actions=["dynamodb:*"],
                            effect=Effect.ALLOW,
                            resources=[
                                "arn:aws:dynamodb:" + self.region + ":" +
                                self.account + ":*"
                            ])
        ]

        with open("custom-resource-code/init.py", encoding="utf-8") as fp:
            code_body = fp.read()

        dynamodb_tables = []

        for s in ['customers', 'vets', 'visits']:
            table = Table(
                self,
                s.capitalize() + 'Table',
                partition_key={
                    'name': 'id',
                    'type': AttributeType.STRING
                },
                removal_policy=core.RemovalPolicy.DESTROY,
                read_capacity=5,
                write_capacity=5,
            )

            dynamodb_tables.append(table.table_name)

            asset = DockerImageAsset(
                self,
                'spring-petclinic-' + s,
                repository_name=self.stack_name + '-' + s,
                directory='./spring-petclinic-serverless/spring-petclinic-' +
                s + '-serverless',
                build_args={
                    'JAR_FILE':
                    'spring-petclinic-' + s + '-serverless-2.0.7.jar'
                })

            ecs_task = FargateTaskDefinition(self,
                                             'TaskDef-Fargate-' + s,
                                             memory_limit_mib=512,
                                             cpu=256)

            ecs_task.add_to_task_role_policy(
                PolicyStatement(actions=["dynamodb:*"],
                                effect=Effect.ALLOW,
                                resources=[table.table_arn]))

            ecs_task.add_to_task_role_policy(
                PolicyStatement(actions=['xray:*'],
                                effect=Effect.ALLOW,
                                resources=['*']))

            env = {
                'DYNAMODB_TABLE_NAME': table.table_name,
                'SERVER_SERVLET_CONTEXT_PATH': '/api/' + s.rstrip('s')
            }

            ecs_container = ecs_task.add_container(
                'Container-' + s,
                image=ContainerImage.from_docker_image_asset(asset),
                logging=LogDriver.aws_logs(stream_prefix=s),
                environment=env)

            ecs_container.add_port_mappings(PortMapping(container_port=8080))

            # Sidecare Container for X-Ray
            ecs_sidecar_container = ecs_task.add_container(
                'Sidecar-Xray-' + s,
                image=ContainerImage.from_registry('amazon/aws-xray-daemon'))

            ecs_sidecar_container.add_port_mappings(
                PortMapping(container_port=2000, protocol=Protocol.UDP))

            ecs_service = FargateService(self,
                                         'FargateService-' + s,
                                         cluster=ecs_cluster,
                                         service_name='spring-petclinic-' + s,
                                         desired_count=2,
                                         task_definition=ecs_task)

            parttern = '/api/' + s.rstrip('s') + '/*'
            priority = randint(1, 10) * len(s)
            check = HealthCheck(
                path='/api/' + s.rstrip('s') + '/manage',
                healthy_threshold_count=2,
                unhealthy_threshold_count=3,
            )

            target = listener.add_targets('ECS-' + s,
                                          path_pattern=parttern,
                                          priority=priority,
                                          port=80,
                                          targets=[ecs_service],
                                          health_check=check)

        resource = CustomResource(
            self,
            "S3ModifyCustomResource",
            provider=CustomResourceProvider.lambda_(
                SingletonFunction(self,
                                  "CustomResourceSingleton",
                                  uuid="f7d4f730-4ee1-11e8-9c2d-fa7ae01bbebc",
                                  code=InlineCode(code_body),
                                  handler="index.handler",
                                  timeout=core.Duration.seconds(300),
                                  runtime=Runtime.PYTHON_3_7,
                                  initial_policy=modify_policy)),
            properties={
                "Bucket": website_bucket.bucket_name,
                "InvokeUrl": 'http://' + alb.load_balancer_dns_name + '/',
                "DynamoDBTables": dynamodb_tables
            })

        core.CfnOutput(self,
                       "FagateALBUrl",
                       export_name="FagateALBUrl",
                       value=alb.load_balancer_dns_name)
        core.CfnOutput(self,
                       "FagatePetclinicWebsiteUrl",
                       export_name="FagatePetclinicWebsiteUrl",
                       value=website_bucket.bucket_website_url)