def test_internet_gateway(stack: Stack) -> None: """Test InternetGateway construct.""" vpc = ec2.VPC( name_to_id("vpc-test"), CidrBlock="10.0.0.0/16", EnableDnsHostnames="true", EnableDnsSupport="true", ) subnets = [ ec2.Subnet( name_to_id(f"{zone}-subnet"), CidrBlock="10.0.0.0/20", AvailabilityZone="eu-west-1b", VpcId=Ref(vpc), MapPublicIpOnLaunch="true", ) for zone, ip in zip(["eu-west-1a", "eu-west-1b"], ["10.0.0.0/20", "10.0.16.0/20"]) ] igw = InternetGateway(name_prefix="test", vpc=vpc, subnets=subnets) for el in (vpc, *subnets, igw): stack.add(el) assert stack.export()["Resources"] == EXPECTED_TEMPLATE
def resources(self, stack: Stack) -> list[AWSObject]: """Return resources associated with the construct.""" igw = ec2.InternetGateway(name_to_id(f"{self.name_prefix}-igw")) attachement = ec2.VPCGatewayAttachment( name_to_id(f"{self.name_prefix}-igw-attachement"), InternetGatewayId=Ref(igw), VpcId=Ref(self.vpc), ) route = ec2.Route( name_to_id(f"{self.name_prefix}-igw-route"), RouteTableId=Ref(self.route_table), DestinationCidrBlock="0.0.0.0/0", GatewayId=Ref(igw), ) result = [igw, attachement, route] # If a new route table has to be created associate it with provided subnets if self.add_route_table_to_stack: result.append(self.route_table) assert self.subnets is not None result.extend([ ec2.SubnetRouteTableAssociation( name_to_id(f"{self.name_prefix}-{num}"), RouteTableId=Ref(self.route_table), SubnetId=Ref(subnet), ) for subnet, num in zip(self.subnets, range(len(self.subnets))) ]) return result
def resources(self, stack: Stack) -> list[AWSObject]: """Return resources associated with the construct.""" igw = ec2.InternetGateway(name_to_id(f"{self.name_prefix}-igw")) attachement = ec2.VPCGatewayAttachment( name_to_id(f"{self.name_prefix}-igw-attachement"), InternetGatewayId=Ref(igw), VpcId=Ref(self.vpc), ) route_table = ec2.RouteTable( name_to_id(f"{self.name_prefix}-igw-route-table"), VpcId=Ref(self.vpc) ) route = ec2.Route( name_to_id(f"{self.name_prefix}-igw-route"), RouteTableId=Ref(route_table), DestinationCidrBlock="0.0.0.0/0", GatewayId=Ref(igw), ) route_table_associations = ( ec2.SubnetRouteTableAssociation( name_to_id(f"{self.name_prefix}-{num}"), RouteTableId=Ref(route_table), SubnetId=Ref(subnet), ) for subnet, num in zip(self.subnets, range(len(self.subnets))) ) return [igw, attachement, route_table, route, *route_table_associations]
def resources(self) -> List[AWSObject]: """Construct and return a s3.Bucket and its associated s3.BucketPolicy.""" versioning_status = "Suspended" if self.enable_versioning: versioning_status = "Enabled" return [ s3.Bucket( name_to_id(self.name), BucketName=self.name, AccessControl=self.access_control, BucketEncryption=s3.BucketEncryption.from_dict( "DefautBucketEncryption", self.bucket_encryption), PublicAccessBlockConfiguration=self. public_access_block_configuration, VersioningConfiguration=s3.VersioningConfiguration( Status=versioning_status), ), s3.BucketPolicy( name_to_id(self.name) + "Policy", Bucket=self.name, PolicyDocument=self.policy_document.as_dict, DependsOn=name_to_id(self.name), ), ]
def targets(self) -> list[events.Target]: """Return rule's targets.""" return [ events.Target( Arn=GetAtt(name_to_id(self.ecs_cluster.name), "Arn"), RoleArn=GetAtt(self.ecs_cluster.ecs_events_role.name, "Arn"), EcsParameters=self.ecs_parameters(task_name), Id=name_to_id(f"{task_name}-target"), ) for task_name in self.task_names ]
def resources(self, stack: Stack) -> list[AWSObject]: # API logical id logical_id = name_to_id(self.name) result = [] # Create a log group for the API result.append( logs.LogGroup(logical_id + "LogGroup", LogGroupName=self.name)) # Create the API itself api_params = { "Description": self.description, "ProtocolType": "HTTP", "Name": self.name, "DisableExecuteApiEndpoint": self.disable_execute_api_endpoint, } result.append(apigatewayv2.Api(name_to_id(self.name), **api_params)) # Declare the default stage result.append( self.declare_stage(stage_name="$default", log_arn=GetAtt(logical_id + "LogGroup", "Arn"))) # Declare one integration result.append( apigatewayv2.Integration( logical_id + "Integration", ApiId=Ref(logical_id), IntegrationType="AWS_PROXY", IntegrationUri=self.lambda_arn, PayloadFormatVersion="2.0", )) # Declare the routes for route in self.route_list: result += self.declare_route(route=route, integration=Ref(logical_id + "Integration")) # Declare the domain if self.domain_name is not None: assert self.hosted_zone_id is not None result += self.declare_domain( domain_name=self.domain_name, hosted_zone_id=self.hosted_zone_id, stage_name="$default", ) # Declare the authorizers for auth_name, auth_params in self.authorizers.items(): result.append( apigatewayv2.Authorizer(name_to_id(auth_name), **auth_params)) return result
def resources(self, stack: Stack) -> list[AWSObject]: """Return list of AWSObject associated with the construct.""" # Add bucket policy granting read access to te cloudfront distribution self.add_oai_access_to_bucket() result = [ *self.bucket.resources(stack), self.cache_policy, self.distribution, self.origin_access_identity, ] # Add a lambda invalidating cloudfront cache when bucket objects are modified result.extend(self.add_cache_invalidation(stack)) # Add route53 records if needed if self.r53_route_from: for zone_id, domain in self.r53_route_from: result.append( route53.RecordSetType( name_to_id(f"{self.name}-{domain}-r53-rset"), AliasTarget=route53.AliasTarget( DNSName=self.domain_name, # Z2FDTNDATAQYW2 is always the hosted zone ID when you # create an alias record that routes traffic to a # CloudFront distribution HostedZoneId="Z2FDTNDATAQYW2", ), Name=domain, HostedZoneId=zone_id, Type="A", )) return result
def cache_policy(self) -> cloudfront.CachePolicy: """Return cloudfront distribution cache policy.""" if self._cache_policy is None: forwarded_to_origin = cloudfront.ParametersInCacheKeyAndForwardedToOrigin( CookiesConfig=cloudfront.CacheCookiesConfig( CookieBehavior="none"), EnableAcceptEncodingBrotli="true", EnableAcceptEncodingGzip="true", HeadersConfig=cloudfront.CacheHeadersConfig( HeaderBehavior="none"), QueryStringsConfig=cloudfront.CacheQueryStringsConfig( QueryStringBehavior="none"), ) self._cache_policy = cloudfront.CachePolicy( name_to_id(f"{self.name}-cloudfront-cache-policy"), CachePolicyConfig=cloudfront.CachePolicyConfig( Comment=f"{self.name} s3 website cloudfront cache policy", DefaultTTL=self.default_ttl, MaxTTL=31536000, MinTTL=1, Name="s3-cache-policy", ParametersInCacheKeyAndForwardedToOrigin= forwarded_to_origin, ), ) return self._cache_policy
def subnet(self) -> ec2.Subnet: """Return a Subnet for the VPC.""" return ec2.Subnet( name_to_id(f"{self.name}Subnet"), VpcId=Ref(self.vpc), CidrBlock=self.subnet_cidr_block, )
def s3_route_table_assoc(self) -> ec2.SubnetRouteTableAssociation: """Return route table association.""" return ec2.SubnetRouteTableAssociation( name_to_id(f"{self.name}S3RouteTableAssoc"), RouteTableId=Ref(self.s3_route_table), SubnetId=Ref(self.subnet), )
def add_lambda_subscription( self, function: Function, delivery_policy: Optional[dict] = None) -> None: """Add a lambda subscription endpoint to topic. :param function: lambda function that will be added as endpoint :param delivery_policy: The delivery policy to assign to the subscription """ sub_params = { key: val for key, val in { "Endpoint": function.arn, "Protocol": "lambda", "TopicArn": self.arn, "DeliveryPolicy": delivery_policy, }.items() } self.optional_resources.extend([ sns.SubscriptionResource(name_to_id(f"{function.name}Sub"), **sub_params), function.invoke_permission(name_suffix=self.name, service="sns", source_arn=self.arn), ])
def route_table(self): """Return route table to which the route to IGW is added.""" if self._route_table is None: self._route_table = ec2.RouteTable( name_to_id(f"{self.name_prefix}-igw-route-table"), VpcId=Ref(self.vpc)) return self._route_table
def __init__( self, name: str, fifo: bool = False, visibility_timeout: int = 30, dlq_name: Optional[str] = None, ) -> None: """Initialize a SQS. :param name: topic name """ self.name = name self.attr = { "QueueName": name, "VisibilityTimeout": visibility_timeout } if fifo: self.attr.update({ "FifoQueue": True, "QueueName": f"{name}.fifo", "ContentBasedDeduplication": True, }) if dlq_name: self.attr["RedrivePolicy"] = { "deadLetterTargetArn": GetAtt(name_to_id(dlq_name), "Arn"), "maxReceiveCount": "3", }
def invoke_permission( self, name_suffix: str, service: str, source_arn: str, source_account: Optional[str] = None, ) -> awslambda.Permission: """Create a Lambda Permission object for a given service. :param name_suffix: a suffix used in the object name :param service: service name (without amazonaws.com domain name) :param source_arn: arn of the resource that can access the lambda :param source_account: account that holds the resource. This is mandatory only when using S3 as a service as a bucket arn is not linked to an account. :return: an AWSObject """ params = { "Action": "lambda:InvokeFunction", "FunctionName": self.ref, "Principal": f"{service}.amazonaws.com", "SourceArn": source_arn, } if service == "s3": assert source_account is not None if source_account is not None: params["SourceAccount"] = source_account return awslambda.Permission(name_to_id(self.name + name_suffix), **params)
def create_data_dir(self, root_dir: str) -> None: """Create data to be pushed to bucket used by cloudformation for resources.""" # Create directory specific to that lambda package_dir = os.path.join(root_dir, name_to_id(self.name), "package") # Install the requirements if self.requirement_file is not None: p = Run( python_script("pip") + ["install", f"--target={package_dir}", "-r", self.requirement_file], output=None, ) assert p.status == 0 # Copy user code self.populate_package_dir(package_dir=package_dir) # Create an archive create_archive( f"{self.name}_lambda.zip", from_dir=package_dir, dest=root_dir, no_root_dir=True, ) # Remove temporary directory rm(package_dir, recursive=True)
def route_table_assoc(self) -> ec2.SubnetRouteTableAssociation: """Return association of route table to this subnet.""" return ec2.SubnetRouteTableAssociation( name_to_id(f"{self.name}RouteTableAssoc"), RouteTableId=Ref(self.route_table), SubnetId=Ref(self.subnet), )
def resources(self, stack: Stack) -> list[AWSObject]: """Construct and return ECS cluster troposphere resources.""" c_settings = None if self.cluster_settings: c_settings = [ ecs.ClusterSetting(**cs) for cs in self.cluster_settings ] provider_strategy = None if self.default_capacity_provider_strategy: provider_strategy = [ ecs.CapacityProviderStrategyItem(**ps) for ps in self.default_capacity_provider_strategy ] kwargs = { key: val for key, val in { "ClusterName": self.name, "ClusterSettings": c_settings, "CapacityProviders": self.capacity_providers, "DefaultCapacityProviderStrategy": provider_strategy, "Tags": Tags({ "Name": self.name, **self.tags }), }.items() if val is not None } return [ecs.Cluster(name_to_id(self.name), **kwargs)]
def lambda_resources(self, code_bucket: str, code_key: str) -> list[AWSObject]: """Return resource associated with the construct. :param code_bucket: bucket in which the lambda code is located :param code_key: location of the code in the bucket """ code_params = {"S3Bucket": code_bucket, "S3Key": code_key} if self.code_version is not None: code_params["S3ObjectVersion"] = str(self.code_version) if isinstance(self.role, Role): role = self.role.arn else: role = self.role params = { "Code": awslambda.Code(**code_params), "Timeout": self.timeout, "Description": self.description, "Role": role, "FunctionName": self.name, } if self.runtime is not None: params["Runtime"] = self.runtime if self.handler is not None: params["Handler"] = self.handler if self.memory_size is not None: params["MemorySize"] = self.memory_size return [awslambda.Function(name_to_id(self.name), **params)]
def resources(self) -> List[AWSObject]: """Build and return objects associated with the configuration recorder. Return a configuration recorder and a delivery channel with its s3 bucket """ aws_objects = [] config_role = iam.ServiceLinkedRole.from_dict( "AWSServiceRoleForConfig", {"AWSServiceName": "config.amazonaws.com"}) aws_objects.append(config_role) # Add the config recorder recording_group = config.RecordingGroup( AllSupported=True, IncludeGlobalResourceTypes=True) aws_objects.append( config.ConfigurationRecorder( name_to_id("ConfigRecorder"), Name="ConfigRecorder", RecordingGroup=recording_group, RoleARN=Join( ":", [ "arn", "aws", "iam:", AccountId, ("role/aws-service-role/" "config.amazonaws.com/AWSServiceRoleForConfig"), ], ), DependsOn=config_role.title, )) # Add a delivery channel and an associated s3 bucket aws_objects.extend( AWSConfigBucket(name=f"{self.bucket_name}").resources) aws_objects.append( config.DeliveryChannel( name_to_id("DeliveryChannel"), Name="DeliveryChannel", S3BucketName=f"{self.bucket_name}", DependsOn=[name_to_id(f"{self.bucket_name}")], )) return aws_objects
def default_egress_rule(self) -> ec2.SecurityGroupEgress: """Return egress that disables default egress Rule.""" return ec2.SecurityGroupEgress( name_to_id(f"{self.name}DefaultEgress"), CidrIp=self.cidr_block, IpProtocol="-1", GroupId=Ref(self.security_group), )
def security_group(self) -> ec2.SecurityGroup: """Return a security group for ECS tasks.""" return ec2.SecurityGroup( name_to_id(f"{self.name}SecurityGroup"), GroupDescription="Security group for ECS tasks", SecurityGroupEgress=[], SecurityGroupIngress=[], VpcId=Ref(self.vpc), )
def resources(self) -> List[AWSObject]: """Return troposphere objects defining the role.""" attr = { "RoleName": self.name, "Description": self.description, "ManagedPolicyArns": self.managed_policy_arns, "AssumeRolePolicyDocument": self.assume_role_policy_document.as_dict, } return [iam.Role.from_dict(name_to_id(self.name), attr)]
def nat_gateway(self) -> Optional[ec2.NatGateway]: """Return a NAT gateway for this subnet.""" if self.use_nat and self._nat_gateway is None: self._nat_gateway = ec2.NatGateway( name_to_id(f"{self.name}-nat"), AllocationId=GetAtt(self.nat_eip, "AllocationId"), SubnetId=Ref(self.subnet), ) return self._nat_gateway
def declare_route(self, route: Route, integration: Ref | str) -> list[AWSObject]: """Declare a route. :param route: the route definition :param integration: arn of the integration to use for this route :return: a list of AWSObjects to be added to the stack """ result = [] id_prefix = name_to_id(self.name + route.method + route.route) route_params = { "ApiId": self.ref, "AuthorizationType": route.auth.value, "RouteKey": f"{route.method} {route.route}", "Target": Sub("integrations/${integration}", dict_values={"integration": integration}), } if route.authorizer_name: route_params["AuthorizerId"] = Ref( name_to_id(route.authorizer_name)) result.append(apigatewayv2.Route(id_prefix + "Route", **route_params)) result.append( awslambda.Permission( id_prefix + "LambdaPermission", Action="lambda:InvokeFunction", FunctionName=self.lambda_arn, Principal="apigateway.amazonaws.com", SourceArn=Sub( "arn:aws:execute-api:${AWS::Region}:${AWS::AccountId}:" "${api}/$default/${route_arn}", dict_values={ "api": self.ref, "route_arn": f"{route.method}{route.route}", }, ), )) return result
def lambda_resources( self, code_bucket: Optional[str] = None, code_key: Optional[str] = None, image_uri: Optional[str] = None, ) -> list[AWSObject]: """Return resource associated with the construct. :param code_bucket: bucket in which the lambda code is located :param code_key: location of the code in the bucket :param image_uri: URI of a container image in the Amazon ECR registry """ # If code_bucket and code_key not provided use zipfile if # provided. params: dict[str, Any] = {} if code_bucket is not None and code_key is not None: code_params = {"S3Bucket": code_bucket, "S3Key": code_key} if self.code_version is not None: code_params["S3ObjectVersion"] = str(self.code_version) elif self.code_zipfile is not None: code_params = {"ZipFile": self.code_zipfile} elif image_uri: code_params = {"ImageUri": image_uri} params["PackageType"] = "Image" if isinstance(self.role, Role): role = self.role.arn else: role = self.role params.update( { "Code": awslambda.Code(**code_params), "Timeout": self.timeout, "Description": self.description, "Role": role, "FunctionName": self.name, } ) if self.runtime is not None: params["Runtime"] = self.runtime if self.handler is not None: params["Handler"] = self.handler if self.memory_size is not None: params["MemorySize"] = self.memory_size if self.ephemeral_storage_size is not None: params["EphemeralStorage"] = awslambda.EphemeralStorage( Size=self.ephemeral_storage_size ) return [awslambda.Function(name_to_id(self.name), **params)]
def declare_stage(self, stage_name: str, log_arn: str | GetAtt) -> apigatewayv2.Stage: """Declare an API gateway stage. :param stage_name: name of the stage :param log_arn: arn of the cloudwatch log group in which api calls should be logged :return: the AWSObject corresponding to the Stage """ logical_id = name_to_id(self.name) log_format = { "source_ip": "$context.identity.sourceIp", "request_time": "$context.requestTime", "method": "$context.httpMethod", "route": "$context.routeKey", "protocol": "$context.protocol", "status": "$context.status", "response_length": "$context.responseLength", "request_id": "$context.requestId", "integration_error_msg": "$context.integrationErrorMessage", } access_log_settings = apigatewayv2.AccessLogSettings( DestinationArn=GetAtt(logical_id + "LogGroup", "Arn"), Format=json.dumps(log_format), ) route_settings = apigatewayv2.RouteSettings( DetailedMetricsEnabled=True, ThrottlingBurstLimit=self.burst_limit, ThrottlingRateLimit=self.rate_limit, ) return apigatewayv2.Stage( logical_id + name_to_id(stage_name) + "Stage", AccessLogSettings=access_log_settings, ApiId=Ref(logical_id), AutoDeploy=True, Description=f"stage {stage_name}", DefaultRouteSettings=route_settings, StageName=stage_name, )
def rule(self) -> events.Rule: """Return the rule scheduling the fargate task.""" return events.Rule( name_to_id(self.name), Description=self.description, Name=self.name, ScheduleExpression=self.schedule_expression, State=self.state, Targets=self.targets, )
def ingress_rule(self) -> ec2.SecurityGroupIngress: """Return Ingress rule allowing traffic from aws VPC endpoints.""" return ec2.SecurityGroupIngress( name_to_id(f"{self.name}Ingress"), CidrIp=self.cidr_block, FromPort="443", ToPort="443", IpProtocol="tcp", GroupId=Ref(self.security_group), )
def subnet(self) -> ec2.Subnet: """Return a private subnet.""" if self._subnet is None: self._subnet = ec2.Subnet( name_to_id(self.name), VpcId=Ref(self.vpc), CidrBlock=self.cidr_block, Tags=Tags({"Name": self.name}), ) return self._subnet
def s3_egress_rule(self) -> ec2.SecurityGroupEgress: """Return security group egress rule allowing S3 traffic.""" return ec2.SecurityGroupEgress( name_to_id(f"{self.name}S3Egress"), DestinationPrefixListId="pl-6da54004", FromPort="443", ToPort="443", IpProtocol="tcp", GroupId=Ref(self.security_group), )