class EcsStack(Stack): def __init__(self, scope: Construct, id: str, vpc: IVpc, **kwargs) -> None: super().__init__(scope, id, **kwargs) Tags.of(self).add("Stack", "Common-Ecs") self._cluster = Cluster( self, "Cluster", vpc=vpc, ) asg = AutoScalingGroup( self, "ClusterASG", vpc=vpc, instance_type=InstanceType("t3a.small"), machine_image=EcsOptimizedImage.amazon_linux2(), min_capacity=4, ) self._cluster.add_auto_scaling_group(asg) # Create a SecurityGroup that the NLB can use to allow traffic from # NLB to us. This avoids a cyclic dependency. self.security_group = SecurityGroup( self, "SecurityGroup", vpc=vpc, allow_all_outbound=False, ) # Only use "source_security_group" to check if flows come from ECS. # Do not use it to allow traffic in ECS; use "security_group" for # that. assert isinstance(asg.node.children[0], SecurityGroup) self.source_security_group = asg.node.children[0] # We could also make an additional security-group and add that to # the ASG, but it keeps adding up. This makes it a tiny bit # easier to get an overview what traffic is allowed from the # console on AWS. asg.node.children[0].add_ingress_rule( peer=self.security_group, connection=Port.tcp_range(32768, 65535), description="NLB-self to target", ) asg.node.children[0].add_ingress_rule( peer=self.security_group, connection=Port.udp_range(32768, 65535), description="NLB-self to target (UDP)", ) @property def cluster(self) -> ICluster: return self._cluster
def __init__(self, scope: Construct, id: str, envs: EnvSettings): super().__init__(scope, id) self.ecr = BaseECR(self, "ECR", envs) self.key = BaseKMS(self, "KMS", envs) self.vpc = Vpc(self, "Vpc", nat_gateways=1) self.cluster = Cluster(self, "WorkersCluster", cluster_name="schema-ecs-cluster", vpc=self.vpc) self.db = DatabaseInstance( self, "DataBase", database_name=envs.data_base_name, engine=DatabaseInstanceEngine.POSTGRES, storage_encrypted=True, allocated_storage=50, instance_type=InstanceType.of(InstanceClass.BURSTABLE2, InstanceSize.SMALL), vpc=self.vpc, ) if self.db.secret: CfnOutput( self, id="DbSecretOutput", export_name=self.get_database_secret_arn_output_export_name( envs), value=self.db.secret.secret_arn, )
def get_cluster(self, vpc): cluster = Cluster(self._stack, self._name, vpc=vpc) Tags.of(cluster).add('hostedZoneId', self._config.hosted_zone_id) Tags.of(cluster).add('domain', self._config.domain) self._export('ClusterName', cluster.cluster_name) self._tag_it(cluster) return cluster
def get_cluster(scope: Construct, vpc: Vpc) -> Cluster: config = get_cluster_config() stack_name = config.stack_name return Cluster.from_cluster_attributes( scope, 'cluster', cluster_name=Fn.import_value(stack_name + 'ClusterName'), vpc=vpc, has_ec2_capacity=False, security_groups=[], )
def __init__(self, scope: Construct, _id: str, **kwargs) -> None: super().__init__(scope, _id, **kwargs) task_definition = FargateTaskDefinition( self, 'TaskDefinition', cpu=256, memory_limit_mib=512, execution_role=Role( self, 'ExecutionRole', assumed_by=cast(IPrincipal, ServicePrincipal('ecs-tasks.amazonaws.com')) ), task_role=Role( self, 'TaskRole', assumed_by=cast(IPrincipal, ServicePrincipal('ecs-tasks.amazonaws.com')), managed_policies=[ ManagedPolicy.from_aws_managed_policy_name('AmazonSESFullAccess') ] ) ) task_definition.add_container( 'Container', image=ContainerImage.from_asset( getcwd(), file='Dockerfile', repository_name='jqbx-bot', exclude=['cdk.out'] ), command=['pipenv', 'run', 'python', '-u', '-m', 'src.main'], environment={ 'SPOTIFY_USER_ID': environ.get('SPOTIFY_USER_ID'), 'JQBX_ROOM_ID': environ.get('JQBX_ROOM_ID'), 'JQBX_BOT_DISPLAY_NAME': environ.get('JQBX_BOT_DISPLAY_NAME'), 'JQBX_BOT_IMAGE_URL': environ.get('JQBX_BOT_IMAGE_URL'), 'DATA_SERVICE_BASE_URL': environ.get('DATA_SERVICE_BASE_URL') }, logging=AwsLogDriver( stream_prefix='jqbx-bot', log_group=LogGroup(self, 'LogGroup') ) ) cluster = Cluster(self, '%sCluster' % _id) FargateService(self, '%sService' % _id, cluster=cluster, task_definition=task_definition, desired_count=1)
def __init__(self, scope: core.Construct, id: str, vpc, ecr_repo, **kwargs) -> None: super().__init__(scope, id, **kwargs) task_definition = FargateTaskDefinition(self, "chatTaskDefinition", memory_limit_mib=2048, cpu=512) ecs_cluster = Cluster(self, "chatCluster", cluster_name="chat-cluster", vpc=vpc) fargate_service = ApplicationLoadBalancedFargateService( self, "FargateService", cluster=ecs_cluster, task_definition=task_definition, task_image_options=ApplicationLoadBalancedTaskImageOptions( image=ContainerImage.from_ecr_repository(ecr_repo)), desired_count=3, service_name="chat-service", memory_limit_mib=2048, cpu=512) fargate_service.service.auto_scale_task_count(min_capacity=2, max_capacity=5)
def __init__(self, scope: cdk.Construct, construct_id: str, config, vpc: IVpc, instance: IInstance, neo4j_user_secret: ISecret, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) bucket = Bucket(self, "s3-bucket-altimeter", bucket_name=config["s3_bucket"], encryption=BucketEncryption.UNENCRYPTED, #.S3_MANAGED, # Disable encryption since it's not really required and it conflicts with SCP guardrails set by Control Tower on the Audit account. block_public_access=BlockPublicAccess.BLOCK_ALL ) cluster = Cluster(self, "ecs-cluster-altimeter", cluster_name="ecsclstr-altimeter--default", vpc=vpc ) task_role = Role(self, "iam-role-altimeter-task-role", assumed_by=ServicePrincipal("ecs-tasks.amazonaws.com"), # It appears that within the account where the scanner is running, the task role is (partially) used for scanning resources (rather than the altimeter-scanner-access role). managed_policies=[ ManagedPolicy.from_aws_managed_policy_name('SecurityAudit'), ManagedPolicy.from_aws_managed_policy_name('job-function/ViewOnlyAccess') ] ) task_definition = FargateTaskDefinition(self, "ecs-fgtd-altimeter", task_role=task_role, memory_limit_mib=self.MEMORY_LIMIT, cpu=self.CPU ) docker_path = os.path.join(os.path.curdir,"..") image_asset = DockerImageAsset(self, 'ecr-assets-dia-altimeter', directory=docker_path, file="scanner.Dockerfile" ) task_definition.add_container("ecs-container-altimeter", image= ContainerImage.from_docker_image_asset(image_asset), # memory_limit_mib=self.MEMORY_LIMIT, # cpu=self.CPU, environment= { "CONFIG_PATH": config["altimeter_config_path"], "S3_BUCKET": config["s3_bucket"] }, logging= AwsLogDriver( stream_prefix= 'altimeter', log_retention= RetentionDays.TWO_WEEKS ) ) task_definition.add_to_task_role_policy(PolicyStatement( resources=["arn:aws:iam::*:role/"+config["account_execution_role"]], actions=['sts:AssumeRole'] )) task_definition.add_to_task_role_policy(PolicyStatement( resources=[ "arn:aws:s3:::"+config["s3_bucket"], "arn:aws:s3:::"+config["s3_bucket"]+"/*" ], actions=["s3:GetObject*", "s3:GetBucket*", "s3:List*", "s3:DeleteObject*", "s3:PutObject", "s3:Abort*", "s3:PutObjectTagging"] )) # Grant the ability to record the stdout to CloudWatch Logs # TODO: Refine task_definition.add_to_task_role_policy(PolicyStatement( resources=["*"], actions=['logs:*'] )) # Trigger task every 24 hours Rule(self, "events-rule-altimeter-daily-scan", rule_name="evrule--altimeter-daily-scan", schedule=Schedule.cron(hour="0", minute="0"), description="Daily altimeter scan", targets=[EcsTask( task_definition=task_definition, cluster=cluster, subnet_selection=SubnetSelection(subnet_type=SubnetType.PRIVATE) )] ) # Trigger task manually via event Rule(self, "events-rule-altimeter-manual-scan", rule_name="evrule--altimeter-manual-scan", event_pattern=EventPattern(source=['altimeter']), description="Manual altimeter scan", targets=[EcsTask( task_definition=task_definition, cluster=cluster, subnet_selection=SubnetSelection(subnet_type=SubnetType.PRIVATE) )] ) # Don't put Neo4j Importer lambda in a separate stack since it causes a circular reference with the S3 event source, and using an imported bucket as event source is not possible (you need a Bucket, not an IBucket) neo4j_importer_function = PythonFunction(self, 'lambda-function-neo4j-importer', function_name="function-altimeter--neo4j-importer", entry="../neo4j-importer", index="app.py", handler="lambda_handler", runtime=Runtime.PYTHON_3_8, memory_size=256, timeout=cdk.Duration.seconds(60), vpc=vpc, vpc_subnets=SubnetSelection(subnets=vpc.select_subnets(subnet_group_name='Private').subnets), environment={ "neo4j_address": instance.instance_private_ip, "neo4j_user_secret_name": neo4j_user_secret.secret_name } ) neo4j_importer_function.add_event_source( S3EventSource(bucket, events= [EventType.OBJECT_CREATED, EventType.OBJECT_REMOVED], filters= [ { "prefix": "raw/", "suffix": ".rdf"}] ) ) # Grant lambda read/write access to the S3 bucket for reading raw rdf, writing prepared rdf and generating signed uri bucket.grant_read_write(neo4j_importer_function.role) # Grant lambda read access to the neo4j user secret neo4j_user_secret.grant_read(neo4j_importer_function.role)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here vpc = Vpc(self, "MyVpc", max_azs=2) ecs_cluster = Cluster(self, 'FagateCluster', vpc=vpc) alb = ApplicationLoadBalancer(self, 'EcsLb', vpc=vpc, internet_facing=True) listener = alb.add_listener('EcsListener', port=80) listener.add_fixed_response('Default-Fix', status_code='404') listener.node.default_child.default_action = [{ "type": "fixed-response", "fixedResponseConfig": { "statusCode": "404" } }] website_bucket = Bucket(self, 'PetclinicWebsite', website_index_document='index.html', public_read_access=True, removal_policy=core.RemovalPolicy.DESTROY) deployment = BucketDeployment( self, 'PetclinicDeployWebsite', sources=[Source.asset('./spring-petclinic-static')], destination_bucket=website_bucket, retain_on_delete=False #destination_key_prefix='web/static' ) # Modify the config.js with CF custome resource modify_policy = [ PolicyStatement(actions=[ "s3:PutObject", "s3:PutObjectAcl", "s3:PutObjectVersionAcl", "s3:GetObject" ], effect=Effect.ALLOW, resources=[website_bucket.bucket_arn + "/*"]), PolicyStatement(actions=["s3:ListBucket"], effect=Effect.ALLOW, resources=[website_bucket.bucket_arn]), PolicyStatement(actions=["dynamodb:*"], effect=Effect.ALLOW, resources=[ "arn:aws:dynamodb:" + self.region + ":" + self.account + ":*" ]) ] with open("custom-resource-code/init.py", encoding="utf-8") as fp: code_body = fp.read() dynamodb_tables = [] for s in ['customers', 'vets', 'visits']: table = Table( self, s.capitalize() + 'Table', partition_key={ 'name': 'id', 'type': AttributeType.STRING }, removal_policy=core.RemovalPolicy.DESTROY, read_capacity=5, write_capacity=5, ) dynamodb_tables.append(table.table_name) asset = DockerImageAsset( self, 'spring-petclinic-' + s, repository_name=self.stack_name + '-' + s, directory='./spring-petclinic-serverless/spring-petclinic-' + s + '-serverless', build_args={ 'JAR_FILE': 'spring-petclinic-' + s + '-serverless-2.0.7.jar' }) ecs_task = FargateTaskDefinition(self, 'TaskDef-Fargate-' + s, memory_limit_mib=512, cpu=256) ecs_task.add_to_task_role_policy( PolicyStatement(actions=["dynamodb:*"], effect=Effect.ALLOW, resources=[table.table_arn])) ecs_task.add_to_task_role_policy( PolicyStatement(actions=['xray:*'], effect=Effect.ALLOW, resources=['*'])) env = { 'DYNAMODB_TABLE_NAME': table.table_name, 'SERVER_SERVLET_CONTEXT_PATH': '/api/' + s.rstrip('s') } ecs_container = ecs_task.add_container( 'Container-' + s, image=ContainerImage.from_docker_image_asset(asset), logging=LogDriver.aws_logs(stream_prefix=s), environment=env) ecs_container.add_port_mappings(PortMapping(container_port=8080)) # Sidecare Container for X-Ray ecs_sidecar_container = ecs_task.add_container( 'Sidecar-Xray-' + s, image=ContainerImage.from_registry('amazon/aws-xray-daemon')) ecs_sidecar_container.add_port_mappings( PortMapping(container_port=2000, protocol=Protocol.UDP)) ecs_service = FargateService(self, 'FargateService-' + s, cluster=ecs_cluster, service_name='spring-petclinic-' + s, desired_count=2, task_definition=ecs_task) parttern = '/api/' + s.rstrip('s') + '/*' priority = randint(1, 10) * len(s) check = HealthCheck( path='/api/' + s.rstrip('s') + '/manage', healthy_threshold_count=2, unhealthy_threshold_count=3, ) target = listener.add_targets('ECS-' + s, path_pattern=parttern, priority=priority, port=80, targets=[ecs_service], health_check=check) resource = CustomResource( self, "S3ModifyCustomResource", provider=CustomResourceProvider.lambda_( SingletonFunction(self, "CustomResourceSingleton", uuid="f7d4f730-4ee1-11e8-9c2d-fa7ae01bbebc", code=InlineCode(code_body), handler="index.handler", timeout=core.Duration.seconds(300), runtime=Runtime.PYTHON_3_7, initial_policy=modify_policy)), properties={ "Bucket": website_bucket.bucket_name, "InvokeUrl": 'http://' + alb.load_balancer_dns_name + '/', "DynamoDBTables": dynamodb_tables }) core.CfnOutput(self, "FagateALBUrl", export_name="FagateALBUrl", value=alb.load_balancer_dns_name) core.CfnOutput(self, "FagatePetclinicWebsiteUrl", export_name="FagatePetclinicWebsiteUrl", value=website_bucket.bucket_website_url)
def __init__( self, scope: App, id: str, envs: EnvSettings, components: ComponentsStack, base_resources: BaseResources, ): super().__init__(scope, id) self.db_secret_arn = Fn.import_value( BaseResources.get_database_secret_arn_output_export_name(envs)) self.job_processing_queues = components.data_processing_queues self.vpc = base_resources.vpc self.db = base_resources.db self.app_bucket = Bucket(self, "App", versioned=True) if self.app_bucket.bucket_arn: CfnOutput( self, id="AppBucketOutput", export_name=self.get_app_bucket_arn_output_export_name(envs), value=self.app_bucket.bucket_arn, ) self.pages_bucket = Bucket(self, "Pages", public_read_access=True) self.domain_name = StringParameter.from_string_parameter_name( self, "DomainNameParameter", string_parameter_name="/schema-cms-app/DOMAIN_NAME").string_value self.certificate_arn = StringParameter.from_string_parameter_name( self, "CertificateArnParameter", string_parameter_name="/schema-cms-app/CERTIFICATE_ARN" ).string_value django_secret = Secret(self, "DjangoSecretKey", secret_name="SCHEMA_CMS_DJANGO_SECRET_KEY") lambda_auth_token_secret = Secret( self, "LambdaAuthToken", secret_name="SCHEMA_CMS_LAMBDA_AUTH_TOKEN") if lambda_auth_token_secret.secret_arn: CfnOutput( self, id="lambdaAuthTokenArnOutput", export_name=self.get_lambda_auth_token_arn_output_export_name( envs), value=lambda_auth_token_secret.secret_arn, ) self.django_secret_key = EcsSecret.from_secrets_manager(django_secret) self.lambda_auth_token = EcsSecret.from_secrets_manager( lambda_auth_token_secret) tag_from_context = self.node.try_get_context("app_image_tag") tag = tag_from_context if tag_from_context != "undefined" else None api_image = ContainerImage.from_ecr_repository( repository=Repository.from_repository_name( self, id="BackendRepository", repository_name=BaseECR.get_backend_repository_name(envs)), tag=tag, ) nginx_image = ContainerImage.from_ecr_repository( repository=Repository.from_repository_name( self, id="NginxRepository", repository_name=BaseECR.get_nginx_repository_name(envs)), tag=tag, ) self.api = ApplicationLoadBalancedFargateService( self, "ApiService", service_name=f"{envs.project_name}-api-service", cluster=Cluster.from_cluster_attributes( self, id="WorkersCluster", cluster_name="schema-ecs-cluster", vpc=self.vpc, security_groups=[], ), task_image_options=ApplicationLoadBalancedTaskImageOptions( image=nginx_image, container_name="nginx", container_port=80, enable_logging=True, ), desired_count=1, cpu=512, memory_limit_mib=1024, certificate=Certificate.from_certificate_arn( self, "Cert", certificate_arn=self.certificate_arn), domain_name=self.domain_name, domain_zone=PrivateHostedZone( self, "zone", vpc=self.vpc, zone_name=self.domain_name, ), ) self.api.task_definition.add_container( "backend", image=api_image, command=[ "sh", "-c", "/bin/chamber exec $CHAMBER_SERVICE_NAME -- ./scripts/run.sh" ], logging=AwsLogDriver(stream_prefix="backend-container"), environment={ "POSTGRES_DB": envs.data_base_name, "AWS_STORAGE_BUCKET_NAME": self.app_bucket.bucket_name, "AWS_STORAGE_PAGES_BUCKET_NAME": self.pages_bucket.bucket_name, "SQS_WORKER_QUEUE_URL": self.job_processing_queues[0].queue_url, "SQS_WORKER_EXT_QUEUE_URL": self.job_processing_queues[1].queue_url, "SQS_WORKER_MAX_QUEUE_URL": self.job_processing_queues[2].queue_url, "CHAMBER_SERVICE_NAME": "schema-cms-app", "CHAMBER_KMS_KEY_ALIAS": envs.project_name, }, secrets={ "DB_CONNECTION": EcsSecret.from_secrets_manager( Secret.from_secret_arn(self, id="DbSecret", secret_arn=self.db_secret_arn)), "DJANGO_SECRET_KEY": self.django_secret_key, "LAMBDA_AUTH_TOKEN": self.lambda_auth_token, }, cpu=512, memory_limit_mib=1024, ) self.django_secret_key.grant_read( self.api.service.task_definition.task_role) self.app_bucket.grant_read_write( self.api.service.task_definition.task_role) self.pages_bucket.grant_read_write( self.api.service.task_definition.task_role) for queue in self.job_processing_queues: queue.grant_send_messages( self.api.service.task_definition.task_role) self.api.service.connections.allow_to(self.db.connections, Port.tcp(5432)) self.api.task_definition.add_to_task_role_policy( PolicyStatement( actions=["ses:SendRawEmail", "ses:SendBulkTemplatedEmail"], resources=["*"], )) self.api.task_definition.add_to_task_role_policy( PolicyStatement( actions=[ "kms:Get*", "kms:Describe*", "kms:List*", "kms:Decrypt" ], resources=[ Fn.import_value( BaseKMS.get_kms_arn_output_export_name(envs)) ], )) self.api.task_definition.add_to_task_role_policy( PolicyStatement(actions=["ssm:DescribeParameters"], resources=["*"])) self.api.task_definition.add_to_task_role_policy( PolicyStatement( actions=["ssm:GetParameters*"], resources=[ f"arn:aws:ssm:{self.region}:{self.account}:parameter/schema-cms-app/*" ], ))