def __init__(self, scope: core.Construct, id: str, add_nat_to_vpc: True, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/Vpc.html app_configs = self.node.try_get_context('envs') if add_nat_to_vpc: nat_gw_provider = _ec2.NatProvider.instance( instance_type=_ec2.InstanceType('t2.micro')) env_configs = app_configs['dev'] self.custom_vpc = _ec2.Vpc( self, "customVpcId", cidr=env_configs['vpc_config']['cidr'], max_azs=2, nat_gateway_provider=nat_gw_provider, nat_gateways=1, subnet_configuration=[ _ec2.SubnetConfiguration( name="publicSubnet", cidr_mask=env_configs['vpc_config']['cidr_mask'], subnet_type=_ec2.SubnetType.PUBLIC), _ec2.SubnetConfiguration( name="appSubnet", cidr_mask=env_configs['vpc_config']['cidr_mask'], subnet_type=_ec2.SubnetType.PRIVATE), _ec2.SubnetConfiguration( name="dbSubnet", cidr_mask=env_configs['vpc_config']['cidr_mask'], subnet_type=_ec2.SubnetType.ISOLATED) ]) else: self.custom_vpc = _ec2.Vpc( self, "customVpcId", cidr=env_configs['vpc_config']['cidr'], max_azs=2, nat_gateways=0, subnet_configuration=[ _ec2.SubnetConfiguration( name="frontEndSubnet", cidr_mask=env_configs['vpc_config']['cidr_mask'], subnet_type=_ec2.SubnetType.PUBLIC), _ec2.SubnetConfiguration( name="backEndSubnet", cidr_mask=env_configs['vpc_config']['cidr_mask'], subnet_type=_ec2.SubnetType.ISOLATED) ]) output_1 = core.CfnOutput( self, "CustomVpcId", value=self.custom_vpc.vpc_id, description=f"This vpc has 2 public & 2 isolated subnets. No NATs")
def __init__(self, scope: core.Construct, id: str, vpc_nat_gateways, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.vpc_nat_gateways = vpc_nat_gateways self.vpc = None if self.vpc_nat_gateways == None: self.vpc = aws_ec2.Vpc(self, "ClusterVPC", cidr="10.0.0.0/16") else: self.vpc = aws_ec2.Vpc( self, "ClusterVPC", cidr="10.0.0.0/16", nat_gateways=self.vpc_nat_gateways, )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, *kwargs) # Create a cluster vpc = ec2.Vpc(self, "Vpc", max_a_zs=2) cluster = ecs.Cluster(self, 'fargate-service-autoscaling', vpc=vpc) # Create Fargate Service fargate_service = ecs_patterns.LoadBalancedFargateService( self, "sample-app", cluster=cluster, image=ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample")) # Setup AutoScaling policy scaling = fargate_service.service.auto_scale_task_count(max_capacity=2) scaling.scale_on_cpu_utilization( "CpuScaling", target_utilization_percent=50, scale_in_cooldown=core.Duration.seconds(60), scale_out_cooldown=core.Duration.seconds(60), ) core.CfnOutput( self, "LoadBalancerDNS", value=fargate_service.load_balancer.load_balancer_dns_name)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.vpc = _ec2.Vpc( self, "miztVpc", cidr="10.10.0.0/16", max_azs=2, nat_gateways=0, subnet_configuration=[ _ec2.SubnetConfiguration(name="public", cidr_mask=24, subnet_type=_ec2.SubnetType.PUBLIC), # _ec2.SubnetConfiguration( # name="app", cidr_mask=24, subnet_type=_ec2.SubnetType.PRIVATE # ), _ec2.SubnetConfiguration(name="db", cidr_mask=24, subnet_type=_ec2.SubnetType.ISOLATED) ]) output_0 = core.CfnOutput( self, "AutomationFrom", value=f"{global_args.SOURCE_INFO}", description= "To know more about this automation stack, check out our github page." ) output_1 = core.CfnOutput(self, "VpcId", value=self.vpc.vpc_id, export_name="VpcId")
def __init__(self, scope: core.Stack, id=str, **kwargs): super().__init__(scope, id, **kwargs) self.vpc = ec2.Vpc( self, "BaseVPC", cidr='10.0.0.0/24', enable_dns_support=True, enable_dns_hostnames=True, ) self.services_3000_sec_group = ec2.SecurityGroup( self, "FrontendToBackendSecurityGroup", allow_all_outbound=True, description= "Security group for frontend service to talk to backend services", vpc=self.vpc) self.sec_grp_ingress_self_3000 = ec2.CfnSecurityGroupIngress( self, "InboundSecGrp3000", ip_protocol='TCP', source_security_group_id=self.services_3000_sec_group. security_group_id, from_port=3000, to_port=3000, group_id=self.services_3000_sec_group.security_group_id)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) vpc = ec2.Vpc(self, "CDKFargateVpc", max_azs=2) cluster = ecs.Cluster(self, "CDKFargateCluster", vpc=vpc) role = iam.Role.from_role_arn(self, "CDKFargateECSTaskRole", ROLE_ARN) image = ecs.ContainerImage.from_registry(ECR_REGISOTRY) task_definition = ecs.FargateTaskDefinition(scope=self, id="CDKFargateECSTask", execution_role=role, task_role=role) port_mapping = ecs.PortMapping(container_port=8080, host_port=8080) task_definition.add_container( id="CDKFargateContainer", image=image).add_port_mappings(port_mapping) fargate_service = ecs_patterns.ApplicationLoadBalancedFargateService( self, "CDKFargateService", cluster=cluster, task_definition=task_definition, ) core.CfnOutput( self, "CDKFargateLoadBalancerDNS", value=fargate_service.load_balancer.load_balancer_dns_name, )
def __init__(self, scope: Construct, stack_id: str, env_name: str) -> None: super().__init__(scope, stack_id) ############################################################################################ # ### NETWORKING ########################################################################### ############################################################################################ # create new VPC aws_ec2.Vpc( self, "geostore", # cidr='10.0.0.0/16', # TODO: use specific CIDR subnet_configuration=[ aws_ec2.SubnetConfiguration( cidr_mask=27, name="public", subnet_type=aws_ec2.SubnetType.PUBLIC ), aws_ec2.SubnetConfiguration( cidr_mask=20, name="ecs-cluster", subnet_type=aws_ec2.SubnetType.PRIVATE ), aws_ec2.SubnetConfiguration( name="reserved", subnet_type=aws_ec2.SubnetType.PRIVATE, reserved=True, ), ], max_azs=99 if env_name == PRODUCTION_ENVIRONMENT_NAME else 1, ) Tags.of(self).add("ApplicationLayer", "networking") # type: ignore[arg-type]
def __init__(self, scope: core.Construct, id: str, vpc_cidr: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) self._vpc = ec2.Vpc( self, id, cidr=vpc_cidr, enable_dns_hostnames=True, enable_dns_support=True, max_azs=2, subnet_configuration=[ ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PUBLIC, name="BASTION", cidr_mask=24), ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PRIVATE, name="ECS", cidr_mask=24), ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PUBLIC, name="DBS", cidr_mask=24) ], nat_gateway_provider=ec2.NatProvider.gateway(), nat_gateway_subnets=ec2.SubnetSelection( one_per_az=True, subnet_group_name="BASTION"), gateway_endpoints={ 's3': ec2.GatewayVpcEndpointOptions( service=ec2.GatewayVpcEndpointAwsService.S3, subnets=[ ec2.SubnetSelection(one_per_az=True, subnet_type=ec2.SubnetType.PUBLIC) ]) })
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, *kwargs) # Create VPC and Fargate Cluster # NOTE: Limit AZs to avoid reaching resource quotas vpc = ec2.Vpc( self, "MyVpc", max_a_zs=2 ) cluster = ecs.Cluster( self, 'Ec2Cluster', vpc=vpc ) fargate_service = ecs_patterns.LoadBalancedFargateService( self, "FargateService", cluster=cluster, image=ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample") ) core.CfnOutput( self, "LoadBalancerDNS", value=fargate_service.load_balancer.load_balancer_dns_name )
def __init__(self, app: cdk.App, id: str, **kwargs) -> None: super().__init__(app, id) vpc_cidr = ip_network('10.0.0.0/16') # create the VPC self._vpc = ec2.Vpc(self, id, cidr=str(vpc_cidr), max_a_zs=2, nat_gateways=2)
def __init__(self, app: core.App, id: str, **kwargs) -> None: super().__init__(app, id, **kwargs) vpc = ec2.Vpc(self, "VPC") # Security group for our test instance my_sg = ec2.SecurityGroup(self, "my_sg", vpc=vpc, description="My sg for testing", allow_all_outbound=True) # Add ssh from anywhere my_sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(22), "Allow ssh access from anywhere") asg = autoscaling.AutoScalingGroup( self, "ASG", vpc=vpc, instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.MICRO), machine_image=ec2.AmazonLinuxImage(), ) asg.add_security_group(my_sg) # add our security group, expects object ## Classic Elastic Load Balancer #lb = elb.LoadBalancer( # self, "ELB", # vpc=vpc, # internet_facing=True, # health_check={"port": 22} #) #lb.add_target(asg) # #listener = lb.add_listener( # external_port=8000, # external_protocol=elb.LoadBalancingProtocol.TCP, # internal_port=22, # internal_protocol=elb.LoadBalancingProtocol.TCP #) #listener.connections.allow_default_port_from_any_ipv4("Open to the world") # Network Load Balancer nlb = elbv2.NetworkLoadBalancer( self, "NLB", vpc=vpc, internet_facing=True, cross_zone_enabled=True, vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC)) my_target = elbv2.NetworkTargetGroup(self, "MyTargetGroup", port=22, vpc=vpc) listener = nlb.add_listener("Listener", port=8000, default_target_groups=[my_target]) my_target.add_target(asg)
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) website_url = props.get('WebsiteUrl') # The VPC where everything will live in vpc = ec2.Vpc( self, 'GhostVpc', max_azs=1, subnet_configuration=[ ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PUBLIC, name='GhostVpcPublicSubnet') ], cidr='10.0.0.0/16', ) # Create an auto scaling group to be used by the ECS cluster asg = self.create_asg(vpc) # Create the ECS cluster our Ghost website will be in cluster = self.create_ecs_cluster(vpc, asg) # Create the Ghost ECS service ghost_service = self.create_ghost_ecs_service(cluster, website_url) # Export the resources so other stacks can reference self.output_props = { 'Asg': asg, 'Vpc': vpc, 'Cluster': cluster, 'EcsService': ghost_service, }
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # The code that defines your stack goes here vpc = aws_ec2.Vpc( self, "DynamodbVPC", max_azs=2, gateway_endpoints={ "DynamoDB": aws_ec2.GatewayVpcEndpointOptions( service=aws_ec2.GatewayVpcEndpointAwsService.DYNAMODB) }) #XXX: Another way to add DynamoDB VPC Endpoint #dynamo_db_endpoint = vpc.add_gateway_endpoint("DynamoDB", # service=aws_ec2.GatewayVpcEndpointAwsService.DYNAMODB #) ddb_table = aws_dynamodb.Table( self, "SimpleDynamoDbTable", table_name="SimpleTable", # removal_policy=cdk.RemovalPolicy.DESTROY, partition_key=aws_dynamodb.Attribute( name="pkid", type=aws_dynamodb.AttributeType.STRING), sort_key=aws_dynamodb.Attribute( name="sortkey", type=aws_dynamodb.AttributeType.NUMBER), time_to_live_attribute="ttl", billing_mode=aws_dynamodb.BillingMode.PROVISIONED, read_capacity=15, write_capacity=5, )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.vpc = ec2.Vpc(self, 'Network') VpcEndpointsForAWSServices(self, 'Endpoints', vpc=self.vpc) self.product_descr_bucket = s3.Bucket( self, 'AndroidProducts', removal_policy=core.RemovalPolicy.DESTROY) self.efs_sg = ec2.SecurityGroup( self, 'EfsGroup', vpc=self.vpc, allow_all_outbound=True, description='Security Group for ApkStore EFS') self.efs_sg.add_ingress_rule(peer=ec2.Peer.any_ipv4(), connection=ec2.Port.all_traffic(), description='Allow any traffic') self.efs = efs.FileSystem( self, 'ApkStore', vpc=self.vpc, security_group=self.efs_sg, lifecycle_policy=efs.LifecyclePolicy.AFTER_14_DAYS, performance_mode=efs.PerformanceMode.GENERAL_PURPOSE)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) prj_name = self.node.try_get_context("project_name") env_name = self.node.try_get_context("env") vpc = ec2.Vpc(self, 'devVPC')
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) vpc = ec2.Vpc(self, "EcsVpc", max_azs=2, nat_gateways=0) vpc.add_s3_endpoint('S3Endpoint') vpc.add_interface_endpoint( 'EcrDockerEndpoint', service=ec2.InterfaceVpcEndpointAwsService.ECR_DOCKER) vpc.add_interface_endpoint( 'EcrEndpoint', service=ec2.InterfaceVpcEndpointAwsService.ECR) vpc.add_interface_endpoint( 'CloudWatchLogsEndpoint', service=ec2.InterfaceVpcEndpointAwsService.CLOUDWATCH_LOGS) cluster = ecs.Cluster(self, "EcsCluster", vpc=vpc) task_definition = ecs.FargateTaskDefinition(self, "DemoServiceTask", family="DemoServiceTask") image = ecs.ContainerImage.from_asset("service") container = task_definition.add_container("app", image=image) container.add_port_mappings(ecs.PortMapping(container_port=8080)) ecs_patterns.ApplicationLoadBalancedFargateService( self, "DemoService", cluster=cluster, desired_count=2, task_definition=task_definition)
def __init__(self, scope: core.Construct, id: str, cidr_vpc, cidr_mask, nat_gateways, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here self.vpc = ec2.Vpc( self, "VPC", max_azs=2, cidr=cidr_vpc, # configuration will create 3 groups in 2 AZs = 6 subnets. subnet_configuration=[ ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PUBLIC, name="Public", cidr_mask=cidr_mask), ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PRIVATE, name="Private", cidr_mask=cidr_mask) ], # nat_gateway_provider=ec2.NatProvider.gateway(), nat_gateways=nat_gateways, ) core.Tag(key="Owner", value="Wahaj-vpc") core.CfnOutput(self, "Output", value=self.vpc.vpc_id)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) #Create production VPC self.vpc = ec2.Vpc( self, "VPC-JM", cidr="10.0.0.0/16", max_azs=2, #Configuration below will create 3 subnet groups in 2 AZs = 6 subnets subnet_configuration=[ ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PUBLIC, name="Bastion", cidr_mask=24), ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PRIVATE, name="App-Private", cidr_mask=24), ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.ISOLATED, name="DB-Private", cidr_mask=24) ], nat_gateway_provider=ec2.NatProvider.gateway(), nat_gateways=1, ) core.CfnOutput(self, "Output", value=self.vpc.vpc_id)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Create a 3 tier vpc): self.vpc = _ec2.Vpc( self, "customVpcId", cidr="10.111.0.0/16", max_azs=2, nat_gateways=1, subnet_configuration=[ _ec2.SubnetConfiguration(name="public", cidr_mask=24, subnet_type=_ec2.SubnetType.PUBLIC), _ec2.SubnetConfiguration(name="app", cidr_mask=24, subnet_type=_ec2.SubnetType.PRIVATE), _ec2.SubnetConfiguration(name="db", cidr_mask=24, subnet_type=_ec2.SubnetType.ISOLATED) ]) core.CfnOutput(self, "customVpcOutput", value=self.vpc.vpc_id, export_name="VpcId")
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) ZachECSName= self.__class__.__name__ vpc = ec2.Vpc( self, ZachECSName+"Vpc", max_azs=2 ) asg = autoscaling.AutoScalingGroup( self, ZachECSName+"SCG" , instance_type=ec2.InstanceType("t3a.nano"), machine_image=ecs.EcsOptimizedAmi(), associate_public_ip_address=True, update_type=autoscaling.UpdateType.REPLACING_UPDATE, desired_capacity=3, vpc=vpc, vpc_subnets={'subnetType': ec2.SubnetType.PUBLIC} ) cluster = ecs.Cluster( self, ZachECSName+"Cluster", vpc=vpc ) cluster.add_auto_scaling_group(asg) cluster.add_capacity(ZachECSName+"AutoScalingGroup", instance_type=ec2.InstanceType("t3a.nano"))
def __init__(self, scope: Construct, id: str, **kwargs) -> None: super().__init__(scope, id, *kwargs) vpc = ec2.Vpc(self, "MyVpc", max_azs=2) cluster = ecs.Cluster(self, 'Ec2Cluster', vpc=vpc) asg = autoscaling.AutoScalingGroup( self, "DefaultAutoScalingGroup", instance_type=ec2.InstanceType("t2.micro"), machine_image=ecs.EcsOptimizedImage.amazon_linux2(), vpc=vpc, ) capacity_provider = ecs.AsgCapacityProvider(self, "AsgCapacityProvider", auto_scaling_group=asg) cluster.add_asg_capacity_provider(capacity_provider) ecs_service = ecs_patterns.NetworkLoadBalancedEc2Service( self, "Ec2Service", cluster=cluster, memory_limit_mib=512, task_image_options={ 'image': ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample") }) CfnOutput(self, "LoadBalancerDNS", value=ecs_service.load_balancer.load_balancer_dns_name)
def test_emr_launch_function(): app = core.App() stack = core.Stack(app, 'test-stack') vpc = ec2.Vpc(stack, 'Vpc') success_topic = sns.Topic(stack, 'SuccessTopic') failure_topic = sns.Topic(stack, 'FailureTopic') profile = emr_profile.EMRProfile( stack, 'test-profile', profile_name='test-profile', vpc=vpc) configuration = cluster_configuration.ClusterConfiguration( stack, 'test-configuration', configuration_name='test-configuration') function = emr_launch_function.EMRLaunchFunction( stack, 'test-function', launch_function_name='test-function', emr_profile=profile, cluster_configuration=configuration, cluster_name='test-cluster', success_topic=success_topic, failure_topic=failure_topic, allowed_cluster_config_overrides=configuration.override_interfaces['default'], wait_for_cluster_start=False ) resolved_function = stack.resolve(function.to_json()) print(default_function) print(resolved_function) assert default_function == resolved_function
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) prj_name = self.node.try_get_context('project_name') env_name = self.node.try_get_context('env') self.vpc = ec2.Vpc( self, 'devVPC', cidr="10.10.0.0/16", max_azs=2, enable_dns_hostnames=True, enable_dns_support=True, subnet_configuration=[ ec2.SubnetConfiguration(name='Public', subnet_type=ec2.SubnetType.PUBLIC, cidr_mask=24), ec2.SubnetConfiguration(name='data', subnet_type=ec2.SubnetType.ISOLATED, cidr_mask=24) ]) public_subnets = [ subnet.subnet_id for subnet in self.vpc.public_subnets ] count = 1 for ps in public_subnets: ssm.StringParameter( self, f'public-subnet-{str(count)}', string_value=ps, parameter_name=f'/{env_name}/public-subnet-{str(count)}') count += 1
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) vpc = ec2.Vpc( self, "ZephStreamlitVPC", max_azs=2, # default is all AZs in region, ) cluster = ecs.Cluster(self, "ZephStreamlitCluster", vpc=vpc) # Build Dockerfile from local folder and push to ECR image = ecs.ContainerImage.from_asset('streamlit-docker') # Use an ecs_patterns recipe to do all the rest! ecs_patterns.ApplicationLoadBalancedFargateService( self, "ZephFargateService", cluster=cluster, # Required cpu=256, # Default is 256 desired_count=1, # Default is 1 task_image_options=ecs_patterns. ApplicationLoadBalancedTaskImageOptions( image=image, container_port=8501), # Docker exposes 8501 for streamlit memory_limit_mib=512, # Default is 512 public_load_balancer=True, # Default is False )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # import default VPC #vpc = aws_ec2.Vpc.from_lookup(self, 'VPC', is_default=True) vpc = aws_ec2.Vpc(self, 'EKS-CDK-VPC', cidr='10.0.0.0/16', nat_gateways=1) # create an admin role eks_admin_role = aws_iam.Role(self, 'AdminRole', assumed_by=aws_iam.AccountPrincipal( account_id=self.account) ) # create the cluster cluster = aws_eks.Cluster(self, 'cluster', masters_role=eks_admin_role, vpc=vpc, default_capacity=0, version='1.14', output_cluster_name=True ) cluster.add_capacity('ondemand', instance_type=aws_ec2.InstanceType('t3.large'), max_capacity=1, bootstrap_options=aws_eks.BootstrapOptions( kubelet_extra_args='--node-labels myCustomLabel=od' ) ) cluster.add_capacity('spot', instance_type=aws_ec2.InstanceType('t3.large'), max_capacity=1, spot_price='0.1094', bootstrap_options=aws_eks.BootstrapOptions( kubelet_extra_args='--node-labels myCustomLabel=spot' ) )
def __init__(self, scope: Construct, id: str): super().__init__(scope, id) # The code that defines your stack goes here vpc = aws_ec2.Vpc(self, 'vpc') cluster = aws_ecs.Cluster(self, 'cluster', vpc=vpc) # task_definition = aws_ecs.FargateTaskDefinition(self, 'NodeTask') # log_driver = aws_ecs.LogDriver.aws_logs(stream_prefix="NodeAppContainerLog") # container = task_definition.add_container('NodeApp', # image=aws_ecs.ContainerImage.from_asset("nodejsapp"), logging=log_driver) # port_mapping = aws_ecs.PortMapping(container_port=8080) # container.add_port_mappings(port_mapping) # # aws_ecs.FargateService(self, 'service', # cluster=cluster, # task_definition=task_definition, # desired_count=5) aws_ecs_patterns.LoadBalancedFargateService( self, 'NodeApp', cluster=cluster, desired_count=5, container_name="NodeApp", container_port=8080, image=aws_ecs.ContainerImage.from_asset("nodejsapp"))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here vpc = ec2.Vpc( self, "MyVpc", max_azs=1 ) sg = ec2.SecurityGroup( self, "SG", description='Allow ssh access to ec2 instances', vpc=vpc ) sg.add_ingress_rule( peer=ec2.Peer.any_ipv4(), connection=ec2.Port.tcp(22) ) ec2instance = ec2.Instance( self, "EC2INSTANCE", vpc=vpc, instance_type=ec2.InstanceType.of( ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.MICRO ), machine_image=ec2.AmazonLinuxImage(), vpc_subnets={'subnet_type': ec2.SubnetType.PUBLIC}, security_group=sg, key_name="MyNVKeyPair" )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, *kwargs) vpc = ec2.Vpc( self, "MyVpc", max_azs=2 ) cluster = ecs.Cluster( self, 'Ec2Cluster', vpc=vpc ) cluster.add_capacity("DefaultAutoScalingGroup", instance_type=ec2.InstanceType("t2.micro")) ecs_service = ecs_patterns.NetworkLoadBalancedEc2Service( self, "Ec2Service", cluster=cluster, memory_limit_mib=512, task_image_options={ 'image': ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample") } ) core.CfnOutput( self, "LoadBalancerDNS", value=ecs_service.load_balancer.load_balancer_dns_name )
def setup_vpc(self): """Setup VPC and network Create Vpc with 2 subnets on 2 availability zones: Public: 10.20.0.0/24 on AZ1 | 10.20.1.0/24 on AZ2 Private: 10.20.2.0/24 on AZ1 | 10.20.3.0/24 on AZ2 Returns ------- aws_ce2.Vpc """ return ec2.Vpc(self, 'Vpc', cidr=self.config.vpc_cidr, max_azs=2, subnet_configuration=[ ec2.SubnetConfiguration( subnet_type=ec2.SubnetType.PUBLIC, name='Public', cidr_mask=24, ), ec2.SubnetConfiguration( subnet_type=ec2.SubnetType.PRIVATE, name='Application', cidr_mask=24, ) ])
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) self.vpc = ec2.Vpc( self, "VPC", nat_gateways=1, subnet_configuration=[ ec2.SubnetConfiguration(name="public", subnet_type=ec2.SubnetType.PUBLIC, cidr_mask=24), ec2.SubnetConfiguration(name="private", subnet_type=ec2.SubnetType.PRIVATE, cidr_mask=24), ec2.SubnetConfiguration(name="isolated", subnet_type=ec2.SubnetType.ISOLATED, cidr_mask=24) ], gateway_endpoints={ "S3": ec2.GatewayVpcEndpointOptions( service=ec2.GatewayVpcEndpointAwsService.S3) }, flow_logs={"FlowLogs": ec2.FlowLogOptions()})