def __init__(self, scope: cdk.Stack, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # Create the VPC resource. self._vpc = Vpc(self, "MyVPC", cidr="10.10.0.0/16") # Create a Security Group within the VPC that is used to allow # management traffic from designated jump hosts. self._sg = SecurityGroup( self, "MySG", vpc=self._vpc, allow_all_outbound=False, description="Management traffic from jump boxes", security_group_name="jumpbox-mgmt-traffic") # Add ingress rules to the Security Group for the jump host # 10.255.0.10 to TCP/22 and TCP/3389. self._sg.add_ingress_rule(peer=Peer.ipv4("10.255.0.10/32"), connection=Port( protocol=Protocol.TCP, string_representation="host1", from_port=22, to_port=22)) self._sg.add_ingress_rule(peer=Peer.ipv4("10.255.0.10/32"), connection=Port( protocol=Protocol.TCP, string_representation="host1", from_port=3389, to_port=3389))
def get_web_security_group(self, vpc): security_group = SecurityGroup( self._stack, 'obm_web', vpc=vpc, allow_all_outbound=True, ) for port_number in [SSH_PORT, HTTP_PORT, HTTPS_PORT]: port = Port(from_port=port_number, to_port=port_number, protocol=Protocol.TCP, string_representation=f"Port {port_number}") security_group.add_ingress_rule(peer=Peer.any_ipv4(), connection=port) security_group.add_ingress_rule(peer=Peer.any_ipv6(), connection=port) self._tag_it(security_group) return security_group
def create_sg(self, jump_host, mgmt_ports): # Create a Security Group within the VPC that is used to allow # management traffic from designated jump hosts. self._sg = SecurityGroup( self, "MySG", vpc=self._vpc, allow_all_outbound=False, description="Management traffic from jump boxes", security_group_name="jumpbox-mgmt-traffic") # Add ingress rules to the Security Group for port in mgmt_ports: self._sg.add_ingress_rule(peer=Peer.ipv4(jump_host), connection=Port( protocol=Protocol.TCP, string_representation="jump", from_port=int(port), to_port=int(port)))
def __init__(self, scope: core.Construct, id: str, deploy_env: str, vpc: aws_ec2.Vpc, db_redis_stack: RdsElasticacheEfsStack, config: dict, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.config = config self.deploy_env = deploy_env self.db_port = DB_PORT # cannot map volumes to Fargate task defs yet - so this is done via Boto3 since CDK does not # support it yet: https://github.com/aws/containers-roadmap/issues/825 #self.efs_file_system_id = db_redis_stack.efs_file_system_id cluster_name = get_cluster_name(deploy_env) self.cluster = ecs.Cluster(self, cluster_name, cluster_name=cluster_name, vpc=vpc) pwd_secret = ecs.Secret.from_ssm_parameter( StringParameter.from_secure_string_parameter_attributes( self, f"dbpwd-{deploy_env}", version=1, parameter_name="postgres_pwd")) self.secrets = {"POSTGRES_PASSWORD": pwd_secret} environment = { "EXECUTOR": "Celery", "POSTGRES_HOST": db_redis_stack.db_host, "POSTGRES_PORT": str(self.db_port), "POSTGRES_DB": "airflow", "POSTGRES_USER": self.config["dbadmin"], "REDIS_HOST": db_redis_stack.redis_host, "VISIBILITY_TIMEOUT": str(self.config["celery_broker_visibility_timeout"]) } image_asset = DockerImageAsset(self, "AirflowImage", directory="build", repository_name=config["ecr_repo_name"]) self.image = ecs.ContainerImage.from_docker_image_asset(image_asset) # web server - this initializes the db so must happen first self.web_service = self.airflow_web_service(environment) # https://github.com/aws/aws-cdk/issues/1654 self.web_service_sg().connections.allow_to_default_port( db_redis_stack.postgres_db, 'allow PG') redis_port_info = Port(protocol=Protocol.TCP, string_representation="allow to redis", from_port=REDIS_PORT, to_port=REDIS_PORT) worker_port_info = Port(protocol=Protocol.TCP, string_representation="allow to worker", from_port=AIRFLOW_WORKER_PORT, to_port=AIRFLOW_WORKER_PORT) redis_sg = SecurityGroup.from_security_group_id( self, id=f"Redis-SG-{deploy_env}", security_group_id=db_redis_stack.redis.vpc_security_group_ids[0]) bastion_sg = db_redis_stack.bastion.connections.security_groups[0] self.web_service_sg().connections.allow_to(redis_sg, redis_port_info, 'allow Redis') self.web_service_sg().connections.allow_to_default_port( db_redis_stack.efs_file_system) # scheduler self.scheduler_service = self.create_scheduler_ecs_service(environment) # worker self.worker_service = self.worker_service(environment) self.scheduler_sg().connections.allow_to_default_port( db_redis_stack.postgres_db, 'allow PG') self.scheduler_sg().connections.allow_to(redis_sg, redis_port_info, 'allow Redis') self.scheduler_sg().connections.allow_to_default_port( db_redis_stack.efs_file_system) self.worker_sg().connections.allow_to_default_port( db_redis_stack.postgres_db, 'allow PG') self.worker_sg().connections.allow_to(redis_sg, redis_port_info, 'allow Redis') self.worker_sg().connections.allow_to_default_port( db_redis_stack.efs_file_system) # When you start an airflow worker, airflow starts a tiny web server # subprocess to serve the workers local log files to the airflow main # web server, who then builds pages and sends them to users. This defines # the port on which the logs are served. It needs to be unused, and open # visible from the main web server to connect into the workers. self.web_service_sg().connections.allow_to(self.worker_sg(), worker_port_info, 'web service to worker')