def __init__(self, scope: cdk.Stack, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # Create the VPC resource. self._vpc = Vpc(self, "MyVPC", cidr="10.10.0.0/16") # Create a Security Group within the VPC that is used to allow # management traffic from designated jump hosts. self._sg = SecurityGroup( self, "MySG", vpc=self._vpc, allow_all_outbound=False, description="Management traffic from jump boxes", security_group_name="jumpbox-mgmt-traffic") # Add ingress rules to the Security Group for the jump host # 10.255.0.10 to TCP/22 and TCP/3389. self._sg.add_ingress_rule(peer=Peer.ipv4("10.255.0.10/32"), connection=Port( protocol=Protocol.TCP, string_representation="host1", from_port=22, to_port=22)) self._sg.add_ingress_rule(peer=Peer.ipv4("10.255.0.10/32"), connection=Port( protocol=Protocol.TCP, string_representation="host1", from_port=3389, to_port=3389))
class CdkNowIGetIt(cdk.Construct): def __init__(self, scope: cdk.Stack, construct_id: str, vpc_cidr: str, jump_host: str, mgmt_ports: list, subnet_len: int, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # args: # - vpc_cidr (str): The CIDR range for the VPC. # - jump_host (str): An optional IP address for the jump host. If this # is not specified, te Security Group will not be # created. # - mgmt_ports (list): A list of TCP ports which the jump host is # allowed to connect to. # - subnet_len (int): The prefix length for subnet CIDR addresses. # Create the VPC resource. The VPC does not have an internet gateway, # or NAT gateway. Subnets are created in 2 zones. subnets = [ SubnetConfiguration(name="MyVPC-Private", subnet_type=SubnetType.ISOLATED, cidr_mask=subnet_len) ] self._vpc = Vpc(self, "MyVPC", cidr=vpc_cidr, max_azs=2, nat_gateways=None, subnet_configuration=subnets) # Security Group only created if the jump host parameter was # specified. if jump_host is not None and len(jump_host) > 0: self.create_sg(jump_host, mgmt_ports) def create_sg(self, jump_host, mgmt_ports): # Create a Security Group within the VPC that is used to allow # management traffic from designated jump hosts. self._sg = SecurityGroup( self, "MySG", vpc=self._vpc, allow_all_outbound=False, description="Management traffic from jump boxes", security_group_name="jumpbox-mgmt-traffic") # Add ingress rules to the Security Group for the jump host # TCP/22 and TCP/3389. for port in mgmt_ports: self._sg.add_ingress_rule(peer=Peer.ipv4(jump_host), connection=Port( protocol=Protocol.TCP, string_representation="jump", from_port=int(port), to_port=int(port)))
def _add_default_rules(self, sg: ec2.SecurityGroup, vpc: ec2.Vpc): """ Adds default rules to given security group. TODO: should create a high level construct to hide those rules. TODO: need to review those rules because we probably do not need all of them. Current default rules are: - All IMCP, TCP, and UDP from within the given VPC. - All TCP traffic from MetService network. """ # Allow traffic coming from internal MetService network # TODO: we should probably restrict this rule to port 22 (SSH) sg.add_ingress_rule( peer=ec2.Peer.ipv4(self.config["metservice_cidr"]), connection=ec2.Port.all_tcp(), description="Allow connections from MetService network", ) sg.add_ingress_rule( peer=ec2.Peer.ipv4(vpc.vpc_cidr_block), connection=ec2.Port.all_tcp(), description="Allow all TCP traffic from within the VPC", ) sg.add_ingress_rule( peer=ec2.Peer.ipv4(vpc.vpc_cidr_block), connection=ec2.Port.all_udp(), description="Allow all UDP traffic from within the VPC ", ) sg.add_ingress_rule( peer=ec2.Peer.ipv4(vpc.vpc_cidr_block), connection=ec2.Port.all_icmp(), description="Allow all ICMP traffic from within the VPC ", )
def _set_common_ingress_rules( primary: ec2.SecurityGroup, secondary: ec2.SecurityGroup) -> ec2.SecurityGroup: primary.add_ingress_rule(primary, ec2.Port.tcp_range(0, 65535)) primary.add_ingress_rule(primary, ec2.Port.udp_range(0, 65535)) primary.add_ingress_rule(primary, ec2.Port.icmp_type(-1)) primary.add_ingress_rule(secondary, ec2.Port.tcp_range(0, 65535)) primary.add_ingress_rule(secondary, ec2.Port.udp_range(0, 65535)) primary.add_ingress_rule(secondary, ec2.Port.icmp_type(-1)) return primary
def __init__(self, scope: core.Construct, id_: str, num_of_azs: int) -> None: super().__init__(scope, id_) self.audit_vpc = Vpc( self, id_, max_azs=num_of_azs, subnet_configuration=[ #Currently IOT, AppConfig & Cloudmap are not accessable via VPC endpoint, so we use NAT GW access them SubnetConfiguration(name=PRIVATE_SUBNET_GROUP, subnet_type=SubnetType.PRIVATE, cidr_mask=24), SubnetConfiguration(name=PUBLIC_NAT_GWS_SUBNET_GROUP, subnet_type=SubnetType.PUBLIC, cidr_mask=24) ], gateway_endpoints={ 'S3': GatewayVpcEndpointOptions( service=GatewayVpcEndpointAwsService.S3, subnets=[ SubnetSelection(subnet_group_name=PRIVATE_SUBNET_GROUP) ]), 'DynamoDb': GatewayVpcEndpointOptions( service=GatewayVpcEndpointAwsService.DYNAMODB, subnets=[ SubnetSelection(subnet_group_name=PRIVATE_SUBNET_GROUP) ]), }, enable_dns_support=True, # For the ElasticSearch Public Domain enable_dns_hostnames=True) self.audit_vpc.add_interface_endpoint( 'SsmVpcEndpoint', service=InterfaceVpcEndpointAwsService.SSM, subnets=SubnetSelection(one_per_az=True)) self.audit_vpc.add_interface_endpoint( 'SqsVpcEndpoint', service=InterfaceVpcEndpointAwsService.SQS, subnets=SubnetSelection(one_per_az=True)) self.audit_vpc.add_interface_endpoint( 'Ec2VpcEndpoint', service=InterfaceVpcEndpointAwsService.EC2, subnets=SubnetSelection(one_per_az=True)) self.audit_vpc.add_interface_endpoint( 'LambdaVpcEndpoint', service=InterfaceVpcEndpointAwsService.LAMBDA_, subnets=SubnetSelection(one_per_az=True)) self.lambdas_sg = SecurityGroup(self, id='LambdaSg', vpc=self.audit_vpc, security_group_name='Audit-Lambda')
def __init__(self, scope: core.Construct, id: str, vpc: Vpc, **kwargs): super(ClusterSecurityGroupStack, self).__init__(scope, id, **kwargs) self.__scope = scope self.manager_security_group = SecurityGroup(scope=self, id="manager-sg", vpc=vpc, allow_all_outbound=True) self.master_security_group = SecurityGroup(scope=self, id="master-sg", vpc=vpc, allow_all_outbound=True) self.worker_security_group = SecurityGroup(scope=self, id="worker-sg", vpc=vpc, allow_all_outbound=True) self.__create_rules()
def get_nfs_security_group(self, vpc): nfs_security_group = SecurityGroup( self._stack, 'obm_efs', vpc=vpc, allow_all_outbound=True, ) self._tag_it(nfs_security_group) return nfs_security_group
def add_asg_fleet(self, scope: BaseApp, cluster: Cluster, fleet) -> List[AutoScalingGroup]: created_fleets: List[AutoScalingGroup] = [] node_labels = fleet.get('nodeLabels', {}) node_labels["fleetName"] = fleet.get('name') node_labels_as_str = ','.join(map('='.join, node_labels.items())) # Source of tweaks: https://kubedex.com/90-days-of-aws-eks-in-production/ kubelet_extra_args = ' '.join([ # Add node labels f'--node-labels {node_labels_as_str}' if len(node_labels_as_str) else '', # Capture resource reservation for kubernetes system daemons like the kubelet, container runtime, # node problem detector, etc. '--kube-reserved cpu=250m,memory=1Gi,ephemeral-storage=1Gi', # Capture resources for vital system functions, such as sshd, udev. '--system-reserved cpu=250m,memory=0.2Gi,ephemeral-storage=1Gi', # Start evicting pods from this node once these thresholds are crossed. '--eviction-hard memory.available<0.2Gi,nodefs.available<10%', ]) cluster_sg = SecurityGroup.from_security_group_id( self, 'eks-cluster-sg', security_group_id=cluster.cluster_security_group_id) asg_tags = { "k8s.io/cluster-autoscaler/enabled": "true", f"k8s.io/cluster-autoscaler/{cluster.cluster_name}": "owned", } # For correctly autoscaling the cluster we need our autoscaling groups to not span across AZs # to avoid the AZ Rebalance, hence we create an ASG per subnet for counter, subnet in enumerate(cluster.vpc.private_subnets): asg: AutoScalingGroup = cluster.add_capacity( id=scope.prefixed_str(f'{fleet.get("name")}-{counter}'), instance_type=InstanceType(fleet.get('instanceType')), min_capacity=fleet.get('autoscaling', {}).get('minInstances'), max_capacity=fleet.get('autoscaling', {}).get('maxInstances'), bootstrap_options=BootstrapOptions( kubelet_extra_args=kubelet_extra_args, ), spot_price=str(fleet.get('spotPrice')) if fleet.get('spotPrice') else None, vpc_subnets=SubnetSelection(subnets=[subnet]), ) created_fleets.append(asg) self._add_userdata_production_tweaks(asg) for key, value in asg_tags.items(): Tag.add(asg, key, value) return created_fleets
def create_sg(self, jump_host, mgmt_ports): # Create a Security Group within the VPC that is used to allow # management traffic from designated jump hosts. self._sg = SecurityGroup( self, "MySG", vpc=self._vpc, allow_all_outbound=False, description="Management traffic from jump boxes", security_group_name="jumpbox-mgmt-traffic") # Add ingress rules to the Security Group for port in mgmt_ports: self._sg.add_ingress_rule(peer=Peer.ipv4(jump_host), connection=Port( protocol=Protocol.TCP, string_representation="jump", from_port=int(port), to_port=int(port)))
def get_file_system(scope: Construct) -> FileSystem: config = get_volume_config() stack_name = config.stack_name security_group = SecurityGroup.from_security_group_id( scope, 'nfs_security_group', security_group_id=Fn.import_value(stack_name + 'SecurityGroupId')) return FileSystem.from_file_system_attributes( scope, 'filesystem', file_system_id=Fn.import_value(stack_name + 'FileSystemId'), security_group=security_group)
class CdkNowIGetIt(cdk.Construct): def __init__(self, scope: cdk.Stack, construct_id: str, vpc_cidr: str, jump_host: str, mgmt_ports: list, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # args: # - vpc_cidr (str): The CIDR range for the VPC. # - jump_host (str): An optional IP address for the jump host. If this # is not specified, the Security Group will not be # created. # - mgmt_ports (list): A list of TCP ports which the jump host is # allowed to connect to. # Create the VPC resource with the given CIDR range. self._vpc = Vpc(self, "MyVPC", cidr=vpc_cidr) # Security Group only created if the jump host parameter was # specified. if jump_host is not None and len(jump_host) > 0: self.create_sg(jump_host, mgmt_ports) def create_sg(self, jump_host, mgmt_ports): # Create a Security Group within the VPC that is used to allow # management traffic from designated jump hosts. self._sg = SecurityGroup( self, "MySG", vpc=self._vpc, allow_all_outbound=False, description="Management traffic from jump boxes", security_group_name="jumpbox-mgmt-traffic") # Add ingress rules to the Security Group for port in mgmt_ports: self._sg.add_ingress_rule(peer=Peer.ipv4(jump_host), connection=Port( protocol=Protocol.TCP, string_representation="jump", from_port=int(port), to_port=int(port)))
def __init__(self, scope: Construct, id: str, vpc: IVpc, **kwargs) -> None: super().__init__(scope, id, **kwargs) Tags.of(self).add("Stack", "Common-Ecs") self._cluster = Cluster( self, "Cluster", vpc=vpc, ) asg = AutoScalingGroup( self, "ClusterASG", vpc=vpc, instance_type=InstanceType("t3a.small"), machine_image=EcsOptimizedImage.amazon_linux2(), min_capacity=4, ) self._cluster.add_auto_scaling_group(asg) # Create a SecurityGroup that the NLB can use to allow traffic from # NLB to us. This avoids a cyclic dependency. self.security_group = SecurityGroup( self, "SecurityGroup", vpc=vpc, allow_all_outbound=False, ) # Only use "source_security_group" to check if flows come from ECS. # Do not use it to allow traffic in ECS; use "security_group" for # that. assert isinstance(asg.node.children[0], SecurityGroup) self.source_security_group = asg.node.children[0] # We could also make an additional security-group and add that to # the ASG, but it keeps adding up. This makes it a tiny bit # easier to get an overview what traffic is allowed from the # console on AWS. asg.node.children[0].add_ingress_rule( peer=self.security_group, connection=Port.tcp_range(32768, 65535), description="NLB-self to target", ) asg.node.children[0].add_ingress_rule( peer=self.security_group, connection=Port.udp_range(32768, 65535), description="NLB-self to target (UDP)", )
def get_web_security_group(self, vpc): security_group = SecurityGroup( self._stack, 'obm_web', vpc=vpc, allow_all_outbound=True, ) for port_number in [SSH_PORT, HTTP_PORT, HTTPS_PORT]: port = Port(from_port=port_number, to_port=port_number, protocol=Protocol.TCP, string_representation=f"Port {port_number}") security_group.add_ingress_rule(peer=Peer.any_ipv4(), connection=port) security_group.add_ingress_rule(peer=Peer.any_ipv6(), connection=port) self._tag_it(security_group) return security_group
def __init__(self, scope: core.Construct, id: str, deploy_env: str, vpc: aws_ec2.Vpc, db_redis_stack: RdsElasticacheEfsStack, config: dict, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.config = config self.deploy_env = deploy_env self.db_port = DB_PORT # cannot map volumes to Fargate task defs yet - so this is done via Boto3 since CDK does not # support it yet: https://github.com/aws/containers-roadmap/issues/825 #self.efs_file_system_id = db_redis_stack.efs_file_system_id cluster_name = get_cluster_name(deploy_env) self.cluster = ecs.Cluster(self, cluster_name, cluster_name=cluster_name, vpc=vpc) pwd_secret = ecs.Secret.from_ssm_parameter( StringParameter.from_secure_string_parameter_attributes( self, f"dbpwd-{deploy_env}", version=1, parameter_name="postgres_pwd")) self.secrets = {"POSTGRES_PASSWORD": pwd_secret} environment = { "EXECUTOR": "Celery", "POSTGRES_HOST": db_redis_stack.db_host, "POSTGRES_PORT": str(self.db_port), "POSTGRES_DB": "airflow", "POSTGRES_USER": self.config["dbadmin"], "REDIS_HOST": db_redis_stack.redis_host, "VISIBILITY_TIMEOUT": str(self.config["celery_broker_visibility_timeout"]) } image_asset = DockerImageAsset(self, "AirflowImage", directory="build", repository_name=config["ecr_repo_name"]) self.image = ecs.ContainerImage.from_docker_image_asset(image_asset) # web server - this initializes the db so must happen first self.web_service = self.airflow_web_service(environment) # https://github.com/aws/aws-cdk/issues/1654 self.web_service_sg().connections.allow_to_default_port( db_redis_stack.postgres_db, 'allow PG') redis_port_info = Port(protocol=Protocol.TCP, string_representation="allow to redis", from_port=REDIS_PORT, to_port=REDIS_PORT) worker_port_info = Port(protocol=Protocol.TCP, string_representation="allow to worker", from_port=AIRFLOW_WORKER_PORT, to_port=AIRFLOW_WORKER_PORT) redis_sg = SecurityGroup.from_security_group_id( self, id=f"Redis-SG-{deploy_env}", security_group_id=db_redis_stack.redis.vpc_security_group_ids[0]) bastion_sg = db_redis_stack.bastion.connections.security_groups[0] self.web_service_sg().connections.allow_to(redis_sg, redis_port_info, 'allow Redis') self.web_service_sg().connections.allow_to_default_port( db_redis_stack.efs_file_system) # scheduler self.scheduler_service = self.create_scheduler_ecs_service(environment) # worker self.worker_service = self.worker_service(environment) self.scheduler_sg().connections.allow_to_default_port( db_redis_stack.postgres_db, 'allow PG') self.scheduler_sg().connections.allow_to(redis_sg, redis_port_info, 'allow Redis') self.scheduler_sg().connections.allow_to_default_port( db_redis_stack.efs_file_system) self.worker_sg().connections.allow_to_default_port( db_redis_stack.postgres_db, 'allow PG') self.worker_sg().connections.allow_to(redis_sg, redis_port_info, 'allow Redis') self.worker_sg().connections.allow_to_default_port( db_redis_stack.efs_file_system) # When you start an airflow worker, airflow starts a tiny web server # subprocess to serve the workers local log files to the airflow main # web server, who then builds pages and sends them to users. This defines # the port on which the logs are served. It needs to be unused, and open # visible from the main web server to connect into the workers. self.web_service_sg().connections.allow_to(self.worker_sg(), worker_port_info, 'web service to worker')
def __init__(self, scope: core.Construct, id: str, stack_name: str, task_definition_cpu: int, task_definition_memory_limit_mib: int, docker_image_name: str, container_port: int, desired_container_count: int, private_subnets: Sequence[aws_ec2.Subnet] = None, public_subnets: Sequence[aws_ec2.Subnet] = None, private_security_group: aws_ec2.SecurityGroup = None, public_security_group: aws_ec2.SecurityGroup = None, vpc: aws_ec2.Vpc = None, fargate_cluster: aws_ecs.Cluster = None, authorizer_lambda_arn: str = None, authorizer_lambda_role_arn: str = None, **kwargs): super().__init__(scope, id, **kwargs) # Role self.role = aws_iam.Role( self, 'Role', assumed_by=aws_iam.ServicePrincipal(service='ecs.amazonaws.com'), managed_policies=[ aws_iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name= 'service-role/AmazonECSTaskExecutionRolePolicy') ], inline_policies={ id: aws_iam.PolicyDocument(statements=[ aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=[ 'kms:Encrypt', 'kms:Decrypt', 'kms:ReEncrypt*', 'kms:GenerateDataKey*', 'kms:DescribeKey', 'ec2:CreateNetworkInterface', 'ec2:DescribeNetworkInterfaces', 'ec2:DeleteNetworkInterface', # Remaining actions from https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/quickref-ecs.html 'elasticloadbalancing:DeregisterInstancesFromLoadBalancer', 'elasticloadbalancing:DeregisterTargets', 'elasticloadbalancing:Describe*', 'elasticloadbalancing:RegisterInstancesWithLoadBalancer', 'elasticloadbalancing:RegisterTargets', 'ec2:Describe*', 'ec2:AuthorizeSecurityGroupIngress' ], resources=['*']) ]) }) self.role.assume_role_policy.add_statements( aws_iam.PolicyStatement( actions=['sts:AssumeRole'], principals=[ aws_iam.ServicePrincipal(service='ecs-tasks.amazonaws.com') ])) # Set Defaults if parameters are None if vpc is None: vpc = aws_ec2.Vpc(self, 'Vpc') if private_subnets is None: private_subnets = vpc.private_subnets if public_subnets is None: public_subnets = vpc.public_subnets if public_security_group is None: public_security_group = aws_ec2.SecurityGroup( self, 'PublicSecurityGroup', vpc=vpc, allow_all_outbound=True) # Allow inbound HTTP traffic public_security_group.add_ingress_rule( peer=aws_ec2.Peer.ipv4(cidr_ip='0.0.0.0/0'), connection=aws_ec2.Port.tcp(port=80)) # Allow inbound HTTPS traffic public_security_group.add_ingress_rule( peer=aws_ec2.Peer.ipv4(cidr_ip='0.0.0.0/0'), connection=aws_ec2.Port.tcp(port=443)) if private_security_group is None: private_security_group = aws_ec2.SecurityGroup( self, 'PrivateSecurityGroup', vpc=vpc, allow_all_outbound=True) public_subnet_cidr_blocks = Utils.get_subnet_cidr_blocks( public_subnets) # Create an ingress rule for each of the NLB's subnet's CIDR ranges and add the rules to the ECS service's # security group. This will allow requests from the NLB to go into the ECS service. This allow inbound # traffic from public subnets. for cidr_block in public_subnet_cidr_blocks: private_security_group.add_ingress_rule( peer=aws_ec2.Peer.ipv4(cidr_ip=cidr_block), connection=aws_ec2.Port.tcp(port=container_port)) if fargate_cluster is None: fargate_cluster = aws_ecs.Cluster( self, 'FargateCluster', ) task_def = aws_ecs.FargateTaskDefinition( self, 'TaskDefinition', cpu=task_definition_cpu, memory_limit_mib=task_definition_memory_limit_mib, task_role=self.role, execution_role=self.role) container = aws_ecs.ContainerDefinition( self, 'Container', image=aws_ecs.ContainerImage.from_registry(name=docker_image_name), task_definition=task_def, logging=aws_ecs.AwsLogDriver(stream_prefix='/ecs')) container.add_port_mappings( aws_ecs.PortMapping(container_port=container_port, protocol=aws_ec2.Protocol.TCP)) ecs_service = aws_ecs.FargateService( self, 'FargateService', cluster=fargate_cluster, task_definition=task_def, vpc_subnets=aws_ec2.SubnetSelection(subnets=private_subnets), security_group=private_security_group, desired_count=desired_container_count) target_group = aws_elasticloadbalancingv2.NetworkTargetGroup( self, 'TargetGroup', port=80, # Health check occurs over HTTP health_check=aws_elasticloadbalancingv2.HealthCheck( protocol=aws_elasticloadbalancingv2.Protocol.TCP), targets=[ecs_service], vpc=vpc) nlb = aws_elasticloadbalancingv2.NetworkLoadBalancer( self, 'NetworkLoadBalancer', vpc=vpc, internet_facing=False, vpc_subnets=aws_ec2.SubnetSelection(subnets=public_subnets), ) nlb.add_listener( id='Listener', port=80, # HTTP listener default_target_groups=[target_group]) # nlb.log_access_logs( # todo: add this later when you have time to research the correct bucket policy. # bucket=aws_s3.Bucket( # self, 'LoadBalancerLogBucket', # bucket_name='load-balancer-logs', # public_read_access=False, # block_public_access=aws_s3.BlockPublicAccess( # block_public_policy=True, # restrict_public_buckets=True # ) # ) # ) # Dependencies ecs_service.node.add_dependency(nlb) # API Gateway rest_api = aws_apigateway.RestApi(self, stack_name) resource = rest_api.root.add_resource( path_part='{proxy+}', default_method_options=aws_apigateway.MethodOptions( request_parameters={'method.request.path.proxy': True})) token_authorizer = None if authorizer_lambda_arn and authorizer_lambda_role_arn: token_authorizer = aws_apigateway.TokenAuthorizer( #todo: make this a parameter? self, 'JwtTokenAuthorizer', results_cache_ttl=core.Duration.minutes(5), identity_source='method.request.header.Authorization', assume_role=aws_iam.Role.from_role_arn( self, 'AuthorizerLambdaInvokationRole', role_arn=authorizer_lambda_role_arn), handler=aws_lambda.Function.from_function_arn( self, 'AuthorizerLambda', function_arn=authorizer_lambda_arn)) resource.add_method( http_method='ANY', authorization_type=aws_apigateway.AuthorizationType.CUSTOM, authorizer=token_authorizer, integration=aws_apigateway.HttpIntegration( url=f'http://{nlb.load_balancer_dns_name}/{{proxy}}', http_method='ANY', proxy=True, options=aws_apigateway.IntegrationOptions( request_parameters={ 'integration.request.path.proxy': 'method.request.path.proxy' }, connection_type=aws_apigateway.ConnectionType.VPC_LINK, vpc_link=aws_apigateway.VpcLink( self, 'VpcLink', description= f'API Gateway VPC Link to internal NLB for {stack_name}', vpc_link_name=stack_name, targets=[nlb]))))
def __init__(self, scope: cdk.Construct, construct_id: str, config, vpc: IVpc, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) self.neo4j_user_secret = Secret(self,'secretsmanager-secret-neo4j-user', secret_name=NEO4J_USER_SECRET_NAME ) neo4j_server_instance_role = Role(self,'iam-role-neo4j-server-instance', assumed_by=ServicePrincipal('ec2.amazonaws.com'), managed_policies=[ ManagedPolicy.from_aws_managed_policy_name('AmazonSSMManagedInstanceCore'), # Use SSM Session Manager rather than straight ssh ManagedPolicy.from_aws_managed_policy_name('CloudWatchAgentServerPolicy') ], inline_policies={ "work-with-tags": PolicyDocument( statements=[ PolicyStatement( actions=[ 'ec2:CreateTags', 'ec2:Describe*', 'elasticloadbalancing:Describe*', 'cloudwatch:ListMetrics', 'cloudwatch:GetMetricStatistics', 'cloudwatch:Describe*', 'autoscaling:Describe*', ], resources=["*"] ) ] ), "access-neo4j-user-secret": PolicyDocument( statements=[ PolicyStatement( actions=['secretsmanager:GetSecretValue'], resources=[self.neo4j_user_secret.secret_arn] ) ] ) } ) instance_security_group = SecurityGroup(self, "ec2-sg-neo4j-server-instance", description="Altimeter Neo4j Server Instance", vpc=vpc ) instance_security_group.add_ingress_rule(Peer.ipv4("0.0.0.0/0"), Port.tcp(7687), 'Bolt from ANYWHERE') # TESTING # instance_security_group.add_ingress_rule(Peer.ipv4("0.0.0.0/0"), Port.tcp(7473), 'Bolt from ANYWHERE') # TESTING # Prepare userdata script with open("./resources/neo4j-server-instance-userdata.sh", "r") as f: userdata_content = f.read() userdata_content = userdata_content.replace("[[NEO4J_USER_SECRET_NAME]]",NEO4J_USER_SECRET_NAME) user_data = UserData.for_linux() user_data.add_commands(userdata_content) instance_type = InstanceType.of(InstanceClass.BURSTABLE2, InstanceSize.MEDIUM) self.instance = Instance(self, 'ec2-instance-neo4j-server-instance', instance_name="instance-altimeter--neo4j-community-standalone-server", machine_image=MachineImage.generic_linux( ami_map={ "eu-west-1": "ami-00c8631d384ad7c53" } ), instance_type=instance_type, role=neo4j_server_instance_role, vpc=vpc, # vpc_subnets=SubnetSelection(subnets=vpc.select_subnets(subnet_group_name='Private').subnets), vpc_subnets=SubnetSelection(subnets=vpc.select_subnets(subnet_group_name='Public').subnets), security_group=instance_security_group, user_data=user_data, block_devices=[ BlockDevice( device_name="/dev/sda1", volume=BlockDeviceVolume.ebs( volume_size=10, volume_type=EbsDeviceVolumeType.GP2, encrypted=True, # Just encrypt delete_on_termination=True ) ), BlockDevice( device_name="/dev/sdb", volume=BlockDeviceVolume.ebs( volume_size=20, # TODO: Check size requirements volume_type=EbsDeviceVolumeType.GP2, encrypted=True, delete_on_termination=True # ASSUMPTION: NO 'primary' data, only altimeter results. ) ) ] ) cdk.Tags.of(self.instance).add("neo4j_mode", "SINGLE") cdk.Tags.of(self.instance).add("dbms_mode", "SINGLE")
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) attribute_tagged_group = Group(self, "Flexible Tagged") access_project = core.CfnTag(key="access-project", value="elysian") access_team = core.CfnTag(key="access-team", value="webdev") access_cost_center = core.CfnTag(key="cost-center", value="2600") flexible_boundary_policy = CfnManagedPolicy( self, "FlexiblePermissionBoundary", policy_document=json.loads(flexible_policy_permission_boundary), ) CfnUser( self, "Developer", tags=[access_project, access_team, access_cost_center], groups=[attribute_tagged_group.group_name], permissions_boundary=flexible_boundary_policy.ref, ) # Add AWS managed policy for EC2 Read Only access for the console. attribute_tagged_group.add_managed_policy( ManagedPolicy.from_aws_managed_policy_name( managed_policy_name="AmazonEC2ReadOnlyAccess" ) ) # Import a json policy and create CloudFormation Managed Policy CfnManagedPolicy( self, "FlexibleAttributePolicy", policy_document=json.loads(full_attribute_based_policy), groups=[attribute_tagged_group.group_name], ) vpc = Vpc.from_lookup(self, "AttributeTaggedVPC", is_default=True) instance_type = InstanceType("t2.micro") ami = MachineImage.latest_amazon_linux() blocked_instance = Instance( self, "Blocked Instance", machine_image=ami, instance_type=instance_type, vpc=vpc, ) # Re-use the AMI from t image_id = blocked_instance.instance.image_id # Can only add tags to CfnInstance as of cdk v1.31 valid_instance = CfnInstance( self, "Valid Instance", image_id=image_id, instance_type="t2.micro", tags=[access_project, access_team, access_cost_center], ) # Empty group as it's not need to complete our tests. test_security_group = SecurityGroup( self, "EmptySecurityGroup", vpc=vpc) core.CfnOutput( self, "BlockedInstance", value=blocked_instance.instance_id, export_name="elysian-blocked-instance", ) core.CfnOutput( self, "ValidInstance", value=valid_instance.ref, export_name="elysian-valid-instance", ) core.CfnOutput( self, "TestSecurityGroup", value=test_security_group.security_group_id, export_name="test-elysian-sg", ) core.CfnOutput( self, "DefaultAMI", value=image_id, export_name="default-elysian-ami" )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) username_tagged = Group(self, "Username Tagged") developer = User(self, "Developer") developer.add_to_group(username_tagged) # Add AWS managed policy for EC2 Read Only access for the console. username_tagged.add_managed_policy( ManagedPolicy.from_aws_managed_policy_name( managed_policy_name="AmazonEC2ReadOnlyAccess" ) ) # Import a json policy and create CloudFormation Managed Policy CfnManagedPolicy( self, "UserTaggedPolicy", policy_document=json.loads(username_based_policy), groups=[username_tagged.group_name], ) vpc = Vpc.from_lookup(self, "UsernameTaggedVPC", is_default=True) instance_type = InstanceType("t2.micro") ami = MachineImage.latest_amazon_linux() blocked_instance = Instance( self, "Blocked Instance", machine_image=ami, instance_type=instance_type, vpc=vpc, ) # Re-use the AMI from t image_id = blocked_instance.instance.image_id # Can only add tags to CfnInstance as of 1.31 dev_username_tag = core.CfnTag( key="username", value=developer.user_name) valid_instance = CfnInstance( self, "Valid Instance", image_id=image_id, instance_type="t2.micro", tags=[dev_username_tag], ) # Empty group as it's not need to complete our tests. test_security_group = SecurityGroup( self, "EmptySecurityGroup", vpc=vpc) core.CfnOutput( self, "BlockedInstance", value=blocked_instance.instance_id, export_name="username-blocked-instance", ) core.CfnOutput( self, "ValidInstance", value=valid_instance.ref, export_name="username-valid-instance", ) core.CfnOutput( self, "TestSecurityGroup", value=test_security_group.security_group_id, export_name="test-username-sg", ) core.CfnOutput( self, "DefaultAMI", value=image_id, export_name="default-username-ami" )
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) role = iam.Role( scope=self, id='AwsCustomResourceRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com')) role.add_to_policy( iam.PolicyStatement(actions=['iam:PassRole'], resources=['*'])) my_custom_resource = cr.AwsCustomResource( scope=self, id='MyAwsCustomResource', role=role, policy=cr.AwsCustomResourcePolicy.from_sdk_calls(resources=['*']), on_create=cr.AwsSdkCall( action='listBuckets', service='s3', physical_resource_id=cr.PhysicalResourceId.of('BucketsList'), )) vpc = VPCConstruct(self, id_='test-vpc', num_of_azs=2) security_group = SecurityGroup( self, id='test-security-group', vpc=vpc, security_group_name='test-security-group') security_group.add_ingress_rule(connection=Port.tcp(443), peer=vpc.lambdas_sg) domain = es.Domain( scope=self, id='Domain', version=es.ElasticsearchVersion.V7_9, domain_name="es-domain-name", enable_version_upgrade=False, enforce_https=True, fine_grained_access_control=None, node_to_node_encryption=True, tls_security_policy=es.TLSSecurityPolicy.TLS_1_0, logging=es.LoggingOptions( app_log_enabled=True, slow_index_log_enabled=True, slow_search_log_enabled=True, app_log_group=LogGroup( scope=self, id="app-log-group", log_group_name=f'/aws/aes/domains/esdomain/app-log-group', removal_policy=core.RemovalPolicy.DESTROY), slow_index_log_group=LogGroup( scope=self, id="slow-index-log-group", log_group_name= f'/aws/aes/domains/esdomain/slow-index-log-group', removal_policy=core.RemovalPolicy.DESTROY), slow_search_log_group=LogGroup( scope=self, id="slow-search-log-group", log_group_name= f'/aws/aes/domains/esdomain/slow-search-log-group', removal_policy=core.RemovalPolicy.DESTROY)), removal_policy=core.RemovalPolicy.DESTROY, zone_awareness=es.ZoneAwarenessConfig(availability_zone_count=2, enabled=True), vpc_options=es.VpcOptions( security_groups=[security_group], subnets=vpc.audit_vpc.select_subnets( subnet_group_name=PRIVATE_SUBNET_GROUP).subnets))
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # Create two VPCs - one to host our private website, the other to act as a client website_vpc = Vpc( self, "WEBSITEVPC", cidr="10.0.0.0/16", ) client_vpc = Vpc( self, "ClientVPC", cidr="10.1.0.0/16", ) # Create a bastion host in the client API which will act like our client workstation bastion = BastionHostLinux( self, "WEBClient", vpc=client_vpc, instance_name='my-bastion', instance_type=InstanceType('t3.micro'), machine_image=AmazonLinuxImage(), subnet_selection=SubnetSelection(subnet_type=SubnetType.PRIVATE), security_group=SecurityGroup( scope=self, id='bastion-sg', security_group_name='bastion-sg', description= 'Security group for the bastion, no inbound open because we should access' ' to the bastion via AWS SSM', vpc=client_vpc, allow_all_outbound=True)) # Set up a VPC peering connection between client and API VPCs, and adjust # the routing table to allow connections back and forth VpcPeeringHelper(self, 'Peering', website_vpc, client_vpc) # Create VPC endpoints for API gateway vpc_endpoint = InterfaceVpcEndpoint( self, 'APIGWVpcEndpoint', vpc=website_vpc, service=InterfaceVpcEndpointAwsService.APIGATEWAY, private_dns_enabled=True, ) vpc_endpoint.connections.allow_from(bastion, Port.tcp(443)) endpoint_id = vpc_endpoint.vpc_endpoint_id api_policy = iam.PolicyDocument(statements=[ iam.PolicyStatement(principals=[iam.AnyPrincipal()], actions=['execute-api:Invoke'], resources=['execute-api:/*'], effect=iam.Effect.DENY, conditions={ "StringNotEquals": { "aws:SourceVpce": endpoint_id } }), iam.PolicyStatement(principals=[iam.AnyPrincipal()], actions=['execute-api:Invoke'], resources=['execute-api:/*'], effect=iam.Effect.ALLOW) ]) # Create an s3 bucket to hold the content content_bucket = s3.Bucket(self, "ContentBucket", removal_policy=core.RemovalPolicy.DESTROY) # Upload our static content to the bucket s3dep.BucketDeployment(self, "DeployWithInvalidation", sources=[s3dep.Source.asset('website')], destination_bucket=content_bucket) # Create a private API GW in the API VPC api = apigw.RestApi(self, 'PrivateS3Api', endpoint_configuration=apigw.EndpointConfiguration( types=[apigw.EndpointType.PRIVATE], vpc_endpoints=[vpc_endpoint]), policy=api_policy) # Create a role to allow API GW to access our S3 bucket contents role = iam.Role( self, "Role", assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com")) role.add_to_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, resources=[ content_bucket.bucket_arn, content_bucket.bucket_arn + '/*' ], actions=["s3:Get*"])) # Create a proxy resource that captures all non-root resource requests resource = api.root.add_resource("{proxy+}") # Create an integration with S3 resource_integration = apigw.Integration( type=apigw.IntegrationType.AWS, integration_http_method='GET', options=apigw.IntegrationOptions( request_parameters= { # map the proxy parameter so we can pass the request path "integration.request.path.proxy": "method.request.path.proxy" }, integration_responses=[ apigw.IntegrationResponse( status_code='200', response_parameters= { # map the content type of the S3 object back to the HTTP response "method.response.header.Content-Type": "integration.response.header.Content-Type" }) ], credentials_role=role), # reference the bucket content we want to retrieve uri='arn:aws:apigateway:eu-west-1:s3:path/%s/{proxy}' % (content_bucket.bucket_name)) # handle the GET request and map it to our new integration resource.add_method( "GET", resource_integration, method_responses=[ apigw.MethodResponse(status_code='200', response_parameters={ "method.response.header.Content-Type": False }) ], request_parameters={"method.request.path.proxy": True}) # Handle requests to the root of our site # Create another integration with S3 - this time with no proxy parameter resource_integration = apigw.Integration( type=apigw.IntegrationType.AWS, integration_http_method='GET', options=apigw.IntegrationOptions( integration_responses=[ apigw.IntegrationResponse( status_code='200', response_parameters= { # map the content type of the S3 object back to the HTTP response "method.response.header.Content-Type": "integration.response.header.Content-Type" }) ], credentials_role=role), # reference the bucket content we want to retrieve uri='arn:aws:apigateway:eu-west-1:s3:path/%s/index.html' % (content_bucket.bucket_name)) # handle the GET request and map it to our new integration api.root.add_method("GET", resource_integration, method_responses=[ apigw.MethodResponse( status_code='200', response_parameters={ "method.response.header.Content-Type": False }) ])
def __init__( self, scope: Construct, id: str, *, vpc: IVpc, cluster: ICluster, service: IEc2Service, ecs_security_group: SecurityGroup, deployment: Deployment, **kwargs, ) -> None: super().__init__(scope, id, **kwargs) Tags.of(self).add("Application", self.application_name) Tags.of(self).add("Deployment", deployment.value) security_group = SecurityGroup( self, "LambdaSG", vpc=vpc, ) lambda_func = Function( self, "ReloadLambda", code=Code.from_asset("./lambdas/bananas-reload"), handler="index.lambda_handler", runtime=Runtime.PYTHON_3_8, timeout=Duration.seconds(120), environment={ "CLUSTER": cluster.cluster_arn, "SERVICE": service.service_arn, }, vpc=vpc, security_groups=[security_group, ecs_security_group], reserved_concurrent_executions=1, ) lambda_func.add_to_role_policy( PolicyStatement( actions=[ "ec2:DescribeInstances", "ecs:DescribeContainerInstances", "ecs:DescribeTasks", "ecs:ListContainerInstances", "ecs:ListServices", "ecs:ListTagsForResource", "ecs:ListTasks", ], resources=[ "*", ], ) ) policy = ManagedPolicy(self, "Policy") policy.add_statements( PolicyStatement( actions=[ "lambda:InvokeFunction", ], resources=[lambda_func.function_arn], ) )
def __init__( self, scope: Construct, id: str, cluster: ICluster, ecs_security_group: SecurityGroup, ecs_source_security_group: SecurityGroup, vpc: IVpc, **kwargs, ) -> None: super().__init__(scope, id, **kwargs) global g_nlb Tags.of(self).add("Stack", "Common-Nlb") # TODO -- You need to do some manual actions: # TODO -- 1) enable auto-assign IPv6 address on public subnets # TODO -- 2) add to the Outbound rules of "Live-Common-Nlb/ASG/InstanceSecurityGroup" the destination "::/0" self.private_zone = HostedZone.from_lookup( self, "PrivateZone", domain_name="openttd.internal", private_zone=True, ) user_data = UserData.for_linux(shebang="#!/bin/bash -ex") asset = Asset(self, "NLB", path="user_data/nlb/") user_data.add_commands( "echo 'Extracting user-data files'", "mkdir /nlb", "cd /nlb", ) user_data.add_s3_download_command( bucket=asset.bucket, bucket_key=asset.s3_object_key, local_file="/nlb/files.zip", ) user_data.add_commands("unzip files.zip", ) user_data.add_commands( "echo 'Setting up configuration'", f"echo '{self.region}' > /etc/.region", f"echo '{cluster.cluster_name}' > /etc/.cluster", ) user_data.add_commands( "echo 'Installing nginx'", "amazon-linux-extras install epel", "yum install nginx -y", "cp /nlb/nginx.conf /etc/nginx/nginx.conf", "mkdir /etc/nginx/nlb.d", ) user_data.add_commands( "echo 'Installing Python3'", "yum install python3 -y", "python3 -m venv /venv", "/venv/bin/pip install -r /nlb/requirements.txt", ) user_data.add_commands( "echo 'Generating nginx configuration'", "cd /etc/nginx/nlb.d", "/venv/bin/python /nlb/nginx.py", "systemctl start nginx", ) user_data.add_commands( "echo 'Setting up SOCKS proxy'", "useradd pproxy", "cp /nlb/pproxy.service /etc/systemd/system/", "systemctl daemon-reload", "systemctl enable pproxy.service", "systemctl start pproxy.service", ) asg = AutoScalingGroup( self, "ASG", vpc=vpc, instance_type=InstanceType("t3a.nano"), machine_image=MachineImage.latest_amazon_linux( generation=AmazonLinuxGeneration.AMAZON_LINUX_2), min_capacity=2, vpc_subnets=SubnetSelection(subnet_type=SubnetType.PUBLIC, one_per_az=True), user_data=user_data, health_check=HealthCheck.elb(grace=Duration.seconds(0)), ) asg.add_security_group(ecs_security_group) asg.role.add_managed_policy( ManagedPolicy.from_aws_managed_policy_name( "AmazonSSMManagedInstanceCore")) asset.grant_read(asg.role) policy = ManagedPolicy(self, "Policy") policy_statement = PolicyStatement( actions=[ "ec2:DescribeInstances", "ecs:DescribeContainerInstances", "ecs:DescribeTasks", "ecs:ListContainerInstances", "ecs:ListServices", "ecs:ListTagsForResource", "ecs:ListTasks", ], resources=["*"], ) policy.add_statements(policy_statement) asg.role.add_managed_policy(policy) # We could also make an additional security-group and add that to # the ASG, but it keeps adding up. This makes it a tiny bit # easier to get an overview what traffic is allowed from the # console on AWS. assert isinstance(asg.node.children[0], SecurityGroup) self.security_group = asg.node.children[0] listener_https.add_targets( subdomain_name=self.admin_subdomain_name, port=80, target=asg, priority=2, ) # Create a Security Group so the lambdas can access the EC2. # This is needed to check if the EC2 instance is fully booted. lambda_security_group = SecurityGroup( self, "LambdaSG", vpc=vpc, ) self.security_group.add_ingress_rule( peer=lambda_security_group, connection=Port.tcp(80), description="Lambda to target", ) self.security_group.add_ingress_rule( peer=ecs_source_security_group, connection=Port.udp(8080), description="ECS to target", ) self.create_ecs_lambda( cluster=cluster, auto_scaling_group=asg, ) self.create_asg_lambda( lifecycle_transition=LifecycleTransition.INSTANCE_LAUNCHING, timeout=Duration.seconds(180), vpc=vpc, security_group=lambda_security_group, auto_scaling_group=asg, ) self.create_asg_lambda( lifecycle_transition=LifecycleTransition.INSTANCE_TERMINATING, timeout=Duration.seconds(30), vpc=vpc, security_group=lambda_security_group, auto_scaling_group=asg, ) # Initialize the NLB record on localhost, as we need to be able to # reference it for other entries to work correctly. ARecord( self, "ARecord", target=RecordTarget.from_ip_addresses("127.0.0.1"), zone=dns.get_hosted_zone(), record_name=self.subdomain_name, ttl=Duration.seconds(60), ) AaaaRecord( self, "AAAARecord", target=RecordTarget.from_ip_addresses("::1"), zone=dns.get_hosted_zone(), record_name=self.subdomain_name, ttl=Duration.seconds(60), ) # To make things a bit easier, also alias to staging. self.create_alias(self, "nlb.staging") # Create a record for the internal DNS ARecord( self, "APrivateRecord", target=RecordTarget.from_ip_addresses("127.0.0.1"), zone=self.private_zone, record_name=self.subdomain_name, ttl=Duration.seconds(60), ) if g_nlb is not None: raise Exception("Only a single NlbStack instance can exist") g_nlb = self