def create_redirect_oai_config(self): # Workaround for known bug with CloudFront and S3 Redirects # https://github.com/aws/aws-cdk/issues/5700 return cloudfront.CustomOriginConfig( domain_name=self.bucket_redirect.bucket_website_domain_name, origin_protocol_policy=cloudfront.OriginProtocolPolicy.HTTP_ONLY )
def _create_cloudfront_distribution(self): """Create a cloudfront distribution with a public bucket as the origin""" origin_source = cloudfront.CustomOriginConfig( domain_name=self.bucket.bucket_website_domain_name, origin_protocol_policy=cloudfront.OriginProtocolPolicy.HTTP_ONLY, origin_headers={"Referer": self.__origin_referer_header}, ) self.distribution = cloudfront.CloudFrontWebDistribution( self, "cloudfront_distribution", viewer_certificate = cloudfront.ViewerCertificate.from_acm_certificate(self.certificate, aliases=[self._site_domain_name], security_policy=cloudfront.SecurityPolicyProtocol.TLS_V1_2_2019, ssl_method=cloudfront.SSLMethod.SNI ), origin_configs=[ cloudfront.SourceConfiguration( custom_origin_source=origin_source, behaviors=[ cloudfront.Behavior( is_default_behavior=True, ) ], ) ], viewer_protocol_policy=cloudfront.ViewerProtocolPolicy.REDIRECT_TO_HTTPS, price_class=cloudfront.PriceClass.PRICE_CLASS_ALL, )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) table = ddb.Table(self, 'WeatherData', partition_key={ 'name': 'date_part', 'type': ddb.AttributeType.NUMBER }, sort_key={ 'name': 'time_part', 'type': ddb.AttributeType.NUMBER }, read_capacity=1, write_capacity=1) main_lambda = _lambda.Function( self, 'MainHandler', runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.asset('lambda'), handler='main.handler', environment={'WEATHERDATA_TABLE_NAME': table.table_name}, log_retention=logs.RetentionDays.TWO_WEEKS) table.grant_read_write_data(main_lambda) api = apigw.LambdaRestApi(self, 'MainEndpoint', handler=main_lambda) api.add_usage_plan('UsagePlan', throttle=apigw.ThrottleSettings(rate_limit=10, burst_limit=10)) cloud_front = cf.CloudFrontWebDistribution( self, 'Https2HttpDistribution', viewer_protocol_policy=cf.ViewerProtocolPolicy.ALLOW_ALL, geo_restriction=cf.GeoRestriction.whitelist('US'), origin_configs=[ cf.SourceConfiguration( custom_origin_source=cf.CustomOriginConfig( domain_name=api.url.lstrip("https://").split("/")[0], origin_protocol_policy=cf.OriginProtocolPolicy. HTTPS_ONLY, ), origin_path='/prod', behaviors=[ cf.Behavior( is_default_behavior=True, allowed_methods=cf.CloudFrontAllowedMethods.ALL, cached_methods=cf.CloudFrontAllowedCachedMethods. GET_HEAD, compress=True, forwarded_values=cf.CfnDistribution. ForwardedValuesProperty(query_string=True, )), ], ) ]) core.CfnOutput( self, 'HttpEndpointDomain', value=f'http://{cloud_front.domain_name}', description= 'CloudFront domain name that accepts requests both in HTTP and HTTPS protocols.', export_name='HTTP-Endpoint')
def __init__(self, scope: core.Construct, id: str, hostedzoneid: str, hostedzonename: str, origin_name: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) domain_name = "{}.{}".format("test",hostedzonename) hostedzone = dns.HostedZone.from_hosted_zone_attributes( self, "hosted_zone", hosted_zone_id=hostedzoneid, zone_name=hostedzonename ) acm_certificate = acm.DnsValidatedCertificate( self, "ACMCertGenerator", hosted_zone=hostedzone , region="us-east-1", domain_name="test.awsels.com" , validation_method = acm.ValidationMethod.DNS ) source_configuration = cloudfront.SourceConfiguration( custom_origin_source=cloudfront.CustomOriginConfig( domain_name=origin_name, allowed_origin_ssl_versions=[cloudfront.OriginSslPolicy.TLS_V1_2], http_port=80, https_port=443, origin_protocol_policy=cloudfront.OriginProtocolPolicy.HTTPS_ONLY ), behaviors=[cloudfront.Behavior( compress=False, allowed_methods=cloudfront.CloudFrontAllowedMethods.ALL, is_default_behavior=True, cached_methods=cloudfront.CloudFrontAllowedCachedMethods.GET_HEAD )] ) viewer_configuration = cloudfront.ViewerCertificate.from_acm_certificate( certificate=acm.Certificate.from_certificate_arn(self, "certificate", certificate_arn=acm_certificate.certificate_arn), aliases=[origin_name], security_policy=cloudfront.SecurityPolicyProtocol.TLS_V1, ssl_method=cloudfront.SSLMethod.SNI ) distribution = cloudfront.CloudFrontWebDistribution( self, 'Distribution', origin_configs=[source_configuration], viewer_certificate=viewer_configuration, price_class=cloudfront.PriceClass.PRICE_CLASS_100, )
def __init__(self, scope: core.Construct, id: str, vpc, alb, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.website_bucket = _s3.Bucket( self, "websiteBucket", website_index_document="index.html", website_error_document="error.html", removal_policy=core.RemovalPolicy.DESTROY) self.static_bucket = _s3.Bucket( self, "staticBucket", removal_policy=core.RemovalPolicy.DESTROY) # Website web_behavior = _cf.Behavior(is_default_behavior=True, default_ttl=core.Duration.minutes(0)) web_source_config = _cf.SourceConfiguration( behaviors=[web_behavior], s3_origin_source=_cf.S3OriginConfig( s3_bucket_source=self.website_bucket)) # Static Content static_behavior = _cf.Behavior(path_pattern="images/*", default_ttl=core.Duration.minutes(0)) static_source_config = _cf.SourceConfiguration( behaviors=[static_behavior], s3_origin_source=_cf.S3OriginConfig( s3_bucket_source=self.static_bucket)) # ALB alb_behavior = _cf.Behavior( path_pattern="api/*", allowed_methods=_cf.CloudFrontAllowedMethods.ALL, default_ttl=core.Duration.minutes(0)) alb_source_config = _cf.SourceConfiguration( behaviors=[alb_behavior], custom_origin_source=_cf.CustomOriginConfig( domain_name=alb.load_balancer_dns_name, origin_protocol_policy=_cf.OriginProtocolPolicy.HTTP_ONLY)) self.cf = _cf.CloudFrontWebDistribution( self, "cfDistribution", price_class=_cf.PriceClass.PRICE_CLASS_100, origin_configs=[ web_source_config, static_source_config, alb_source_config ])
def __init__(self, scope: core.Construct, id: str, api: apigateway.RestApi, **kwargs) -> None: super().__init__(scope, id, **kwargs) stack = core.Stack.of(self) bucket = s3.Bucket(self, 'Storage') s3_deployment.BucketDeployment( self, 'Deployment', sources=[ s3_deployment.Source.asset('./src/html'), ], destination_bucket=bucket, ) origin_identity = cloudfront.OriginAccessIdentity(self, 'Identity') bucket.grant_read(origin_identity.grant_principal) s3_origin = cloudfront.SourceConfiguration( s3_origin_source=cloudfront.S3OriginConfig( s3_bucket_source=bucket, origin_access_identity=origin_identity, ), behaviors=[ cloudfront.Behavior( default_ttl=core.Duration.days(1), min_ttl=core.Duration.days(1), max_ttl=core.Duration.days(31), is_default_behavior=True, ) ]) api_origin = cloudfront.SourceConfiguration( origin_path='/{}'.format(api.deployment_stage.stage_name), custom_origin_source=cloudfront.CustomOriginConfig( domain_name='{}.execute-api.{}.{}'.format( api.rest_api_id, stack.region, stack.url_suffix), ), behaviors=[ cloudfront.Behavior(default_ttl=core.Duration.seconds(0), min_ttl=core.Duration.seconds(0), max_ttl=core.Duration.seconds(0), path_pattern='/stock/*', forwarded_values={ 'query_string': True, 'query_string_cache_keys': ['start', 'end'] }) ]) domain_name = 'demo.training' subdomain = 'finance.{}'.format(domain_name) zone = route53.HostedZone.from_lookup( self, 'Zone', domain_name=domain_name, ) certificate = acm.DnsValidatedCertificate( self, 'Certificate', domain_name=subdomain, hosted_zone=zone, region='us-east-1', ) distribution = cloudfront.CloudFrontWebDistribution( self, 'CDN', price_class=cloudfront.PriceClass.PRICE_CLASS_ALL, origin_configs=[ s3_origin, api_origin, ], alias_configuration=cloudfront.AliasConfiguration( acm_cert_ref=certificate.certificate_arn, names=[subdomain], )) route53.ARecord( self, 'DnsRecord', record_name=subdomain, target=route53.AddressRecordTarget.from_alias( alias_target=route53_targets.CloudFrontTarget(distribution)), zone=zone, )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Message timeout; used by SQS and Lambda message_timeout = core.Duration.seconds(15) # SQS queue that the Raspberry Pi will write to queue = sqs.Queue( self, 'Queue', visibility_timeout=message_timeout, receive_message_wait_time=core.Duration.seconds(20), retention_period=core.Duration.hours(1), ) # DynamoDB table that the web app will read from icao_address = dynamodb.Attribute( name='IcaoAddress', type=dynamodb.AttributeType.STRING, ) table = dynamodb.Table( self, 'Table', partition_key=icao_address, billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST, removal_policy=core.RemovalPolicy.DESTROY, ) database = timestream.CfnDatabase( self, 'Database', database_name='aircraft-database', ) table2 = timestream.CfnTable(self, 'Table2', database_name=database.ref, table_name='aircraft-table', retention_properties={ 'MemoryStoreRetentionPeriodInHours': 1, 'MagneticStoreRetentionPeriodInDays': 1, }) # IAM user for the Raspberry Pi user = iam.User(self, 'RaspberryPi') queue.grant_send_messages(user) access_key = iam.CfnAccessKey( self, 'AccessKey', user_name=user.user_name, ) # IAM role for Lambda function, so it can write to DynamoDB lambda_role = iam.Role( self, 'LambdaRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaBasicExecutionRole'), ], ) lambda_role.add_to_policy( iam.PolicyStatement( actions=[ 'timestream:CancelQuery', 'timestream:DescribeEndpoints', 'timestream:DescribeTable', 'timestream:ListMeasures', 'timestream:Select', 'timestream:WriteRecords' ], resources=['*'], # TODO: narrow down permissions )) table.grant_read_write_data(lambda_role) # Integration between SQS and Lambda event = lambda_event_sources.SqsEventSource( queue=queue, batch_size=10, ) # Lambda function that processes messages from SQS queue and updates DynamoDB table import_function = lambda_.Function( self, 'ImportFunction', description='Reads SQS messages and writes to DynamoDB', runtime=lambda_.Runtime.PYTHON_3_8, code=lambda_.Code.from_asset('lambda_import/'), timeout=message_timeout, handler='index.handler', role=lambda_role, events=[event], environment={ 'TABLE_NAME': table2.ref, }, ) # TODO: add custom log group # TODO: add metric filters for number of succesfull updates and failed updates # Lambda function that reads from DynamoDB and returns data to API Gateway api_function = lambda_.Function( self, 'ApiFunction', description='Reads from DynamoDB and returns to API GW', runtime=lambda_.Runtime.PYTHON_3_8, code=lambda_.Code.from_asset('lambda_api/'), timeout=message_timeout, handler='index.handler', role=lambda_role, environment={ 'TABLE_NAME': table.table_name, }, ) # API Gateway for requesting aircraft data api = apigateway.RestApi( self, 'Api', endpoint_types=[apigateway.EndpointType.REGIONAL], cloud_watch_role=False, ) aircraft_resource = api.root.add_resource('aircraft') aircraft_resource.add_method( http_method='GET', integration=apigateway.LambdaIntegration( api_function, proxy=True, ), ) # Static website bucket = s3.Bucket(self, 'StaticWebsite') s3_deployment.BucketDeployment( self, 'Deployment', sources=[ s3_deployment.Source.asset('html/'), ], destination_bucket=bucket, ) # Permissions between CloudFront and S3 origin_identity = cloudfront.OriginAccessIdentity(self, 'Identity') bucket.grant_read(origin_identity.grant_principal) # CloudFront distribution pointing to both S3 and API Gateway s3_origin = cloudfront.SourceConfiguration( s3_origin_source=cloudfront.S3OriginConfig( s3_bucket_source=bucket, origin_access_identity=origin_identity, ), behaviors=[ cloudfront.Behavior( default_ttl=core.Duration.days(0), min_ttl=core.Duration.days(0), max_ttl=core.Duration.days(31), is_default_behavior=True, ) ]) api_origin = cloudfront.SourceConfiguration( origin_path='/{}'.format(api.deployment_stage.stage_name), custom_origin_source=cloudfront.CustomOriginConfig( domain_name='{}.execute-api.{}.{}'.format( api.rest_api_id, self.region, self.url_suffix), ), behaviors=[ cloudfront.Behavior( default_ttl=core.Duration.seconds(0), min_ttl=core.Duration.seconds(0), max_ttl=core.Duration.seconds(0), path_pattern='/aircraft/*', ) ]) domain_name = self.node.try_get_context('domain_name') # If domain name is specified, create a certificate and alias configuration for CloudFront if domain_name is None: alias_configuration = None else: subdomain = 'aircraft.{}'.format(domain_name) zone = route53.HostedZone.from_lookup( self, 'Zone', domain_name=domain_name, ) certificate = acm.DnsValidatedCertificate( self, 'Certificate', domain_name=subdomain, hosted_zone=zone, region='us-east-1', ) alias_configuration = cloudfront.AliasConfiguration( acm_cert_ref=certificate.certificate_arn, names=[subdomain], ) distribution = cloudfront.CloudFrontWebDistribution( self, 'CDN', price_class=cloudfront.PriceClass.PRICE_CLASS_ALL, alias_configuration=alias_configuration, origin_configs=[ s3_origin, api_origin, ], ) # If domain name is specified, create a DNS record for CloudFront if domain_name is not None: route53.ARecord( self, 'DnsRecord', record_name=subdomain, target=route53.AddressRecordTarget.from_alias( alias_target=route53_targets.CloudFrontTarget( distribution)), zone=zone, ) # Outputs that are needed on the Raspberry Pi core.CfnOutput( self, 'QueueUrl', value=queue.queue_url, ) core.CfnOutput( self, 'AccessKeyId', value=access_key.ref, ) core.CfnOutput( self, 'SecretAccessKey', value=access_key.attr_secret_access_key, ) core.CfnOutput( self, 'Region', value=self.region, )
def __init__( self, scope: core.Construct, id: str, **kwargs, ) -> None: super().__init__(scope, id, **kwargs) s3_domain_prefix = scope.static_site_bucket.bucket_name s3_domain_suffix = ".s3-website-us-east-1.amazonaws.com" s3_website_domain_name = s3_domain_prefix + s3_domain_suffix path_patterns = ["/api/*", "/admin/*", "/flower*"] self.distribution = cloudfront.CloudFrontWebDistribution( self, "CloudFrontDistribution", origin_configs=[ cloudfront.SourceConfiguration( custom_origin_source=cloudfront.CustomOriginConfig( domain_name=scope.alb.load_balancer_dns_name, origin_protocol_policy=MATCH_VIEWER, ), behaviors=[ cloudfront.Behavior( allowed_methods=ALL_METHODS, path_pattern=path_pattern, forwarded_values={ "headers": ["*"], "cookies": { "forward": "all" }, "query_string": True, }, ) for path_pattern in path_patterns ], ), cloudfront.SourceConfiguration( custom_origin_source=cloudfront.CustomOriginConfig( domain_name=s3_website_domain_name, origin_protocol_policy=HTTP_ONLY, ), behaviors=[ cloudfront.Behavior( is_default_behavior=True, cached_methods=GET_HEAD, ) ], ), cloudfront.SourceConfiguration( s3_origin_source=cloudfront.S3OriginConfig( s3_bucket_source=scope.backend_assets_bucket), behaviors=[ cloudfront.Behavior( allowed_methods=ALL_METHODS, forwarded_values={"query_string": True}, path_pattern=path_pattern, min_ttl=core.Duration.seconds(0), default_ttl=core.Duration.seconds(0), max_ttl=core.Duration.seconds(0), ) for path_pattern in ["/static/*", "/media/*"] ], ), ], alias_configuration=cloudfront.AliasConfiguration( acm_cert_ref=scope.certificate.certificate_arn, names=[scope.full_domain_name], ), ) route53.ARecord( self, "AliasRecord", target=route53.AddressRecordTarget.from_alias( targets.CloudFrontTarget(self.distribution)), zone=scope.hosted_zone, record_name=f"{scope.full_domain_name}.", )
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Validated require props. required_props_keys = ['CfOriginDomainName', 'Asg', 'HostedZoneName', 'WebsiteDns'] for k in required_props_keys: if k not in props or not props[k]: raise ValueError("Required prop %s is not present" % k) # Create a custom resource that returns the IP of the host behind the autoscaling group asg = props['Asg'] asg_ip_handler = lambda_.Function( self, 'GhostIpHandler', runtime=lambda_.Runtime.PYTHON_3_6, code=lambda_.Code.asset('lambda'), handler='ghost_ip.handler', ) asg_ip_handler.add_to_role_policy( statement=iam.PolicyStatement( actions=['autoscaling:DescribeAutoScalingGroups', 'ec2:DescribeInstances'], resources=['*', '*'], ) ) asg_ip_provider = cr.Provider( self, 'GhostIpProvider', on_event_handler=asg_ip_handler, ) asg_ip_resource = cfn.CustomResource( self, 'GhostIpResource', provider=asg_ip_provider, properties={ 'AsgName': asg.auto_scaling_group_name, 'ts': time.time(), # this makes sure the function is invoked for every CFN update } ) # Create R53 HZ and cf origin domain if 'ExistingHostedZoneId' in props and props['ExistingHostedZoneId']: hz = route53.HostedZone.from_hosted_zone_attributes( self, 'HostedZone', zone_name=props['HostedZoneName'], hosted_zone_id=props['ExistingHostedZoneId'], ) else: hz = route53.HostedZone( self, 'HostedZone', zone_name=props['HostedZoneName'] ) origin_rrset = route53.ARecord( self, 'OriginRecord', target=route53.RecordTarget.from_ip_addresses(asg_ip_resource.get_att_string('GhostIp')), record_name=props['CfOriginDomainName'], zone=hz, ) # Create a CF distro acm_cert = acm.DnsValidatedCertificate( self, 'GhostAcmCert', hosted_zone=hz, domain_name=props['WebsiteDns'], region='us-east-1', ) cf_distro = cf.CloudFrontWebDistribution( self, 'CfDistro', origin_configs=[cf.SourceConfiguration( custom_origin_source=cf.CustomOriginConfig( domain_name=props['CfOriginDomainName'], origin_protocol_policy=cf.OriginProtocolPolicy.HTTP_ONLY, ), behaviors=[cf.Behavior(is_default_behavior=True)], )], alias_configuration=cf.AliasConfiguration( names=[props['WebsiteDns']], acm_cert_ref=acm_cert.certificate_arn, ), default_root_object='', ) # Create the top level website DNS pointing to the CF distro ghost_rrset = route53.CnameRecord( self, 'GhostDns', domain_name=cf_distro.domain_name, zone=hz, record_name=props['WebsiteDns'], )
def __init__( self, scope: core.Construct, id_: str, vpc_stack, elastic_stack, update_lambda_zip=False, **kwargs, ) -> None: super().__init__(scope, id_, **kwargs) # if update lambda zip (including if zip doesn't exist) if ( update_lambda_zip or not pathlib.Path(os.path.join(dirname, "kibana_lambda.zip")).exists() ): # rebuild the lambda if changed call(["docker", "build", "--tag", "kibana-lambda", "."], cwd=dirname) call( ["docker", "create", "-ti", "--name", "dummy", "kibana-lambda", "bash"], cwd=dirname, ) call(["docker", "cp", "dummy:/tmp/kibana_lambda.zip", "."], cwd=dirname) call(["docker", "rm", "-f", "dummy"], cwd=dirname) kibana_bucket = s3.Bucket( self, "kibana_bucket", public_read_access=False, block_public_access=s3.BlockPublicAccess.BLOCK_ALL, removal_policy=core.RemovalPolicy.DESTROY, ) # tag the bucket core.Tag.add(kibana_bucket, "project", constants["PROJECT_TAG"]) # the lambda behind the api kibana_lambda = lambda_.Function( self, "kibana_lambda", description="kibana api gateway lambda", code=lambda_.Code.from_asset(os.path.join(dirname, "kibana_lambda.zip")), handler="lambda_function.lambda_handler", timeout=core.Duration.seconds(300), runtime=lambda_.Runtime.PYTHON_3_8, vpc=vpc_stack.get_vpc, security_groups=[elastic_stack.elastic_security_group], log_retention=logs.RetentionDays.ONE_WEEK, ) # tag the lambda core.Tag.add(kibana_lambda, "project", constants["PROJECT_TAG"]) # create policies for the lambda kibana_lambda_policy = iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["s3:*",], resources=["*"], ) # add the role permissions kibana_lambda.add_to_role_policy(statement=kibana_lambda_policy) # the api gateway kibana_api = apigw.LambdaRestApi( self, "kibana_api", handler=kibana_lambda, binary_media_types=["*/*"] ) # tag the api gateway core.Tag.add(kibana_api, "project", constants["PROJECT_TAG"]) kibana_identity = cloudfront.OriginAccessIdentity(self, "kibana_identity") kibana_api_domain = "/".join(kibana_api.url.split("/")[1:-2])[1:] kibana_api_path = f'/{"/".join(kibana_api.url.split("/")[-2:])}' # create the cloudfront distribution kibana_distribution = cloudfront.CloudFrontWebDistribution( self, "kibana_distribution", origin_configs=[ # the lambda source for kibana cloudfront.SourceConfiguration( custom_origin_source=cloudfront.CustomOriginConfig( domain_name=kibana_api_domain, origin_protocol_policy=cloudfront.OriginProtocolPolicy.HTTPS_ONLY, ), origin_path="/prod", behaviors=[ cloudfront.Behavior( is_default_behavior=True, allowed_methods=cloudfront.CloudFrontAllowedMethods.ALL, cached_methods=cloudfront.CloudFrontAllowedCachedMethods.GET_HEAD_OPTIONS, compress=False, forwarded_values=CfnDistribution.ForwardedValuesProperty( query_string=True, cookies=CfnDistribution.CookiesProperty(forward="all"), headers=[ "Content-Type", "Accept", "Accept-Encoding", "kbn-name", "kbn-version", ], ), ) ], ), # the s3 bucket source for kibana cloudfront.SourceConfiguration( s3_origin_source=cloudfront.S3OriginConfig( s3_bucket_source=kibana_bucket, origin_access_identity=kibana_identity, ), behaviors=[ cloudfront.Behavior( is_default_behavior=False, path_pattern="bucket_cached/*", allowed_methods=cloudfront.CloudFrontAllowedMethods.GET_HEAD, cached_methods=cloudfront.CloudFrontAllowedCachedMethods.GET_HEAD, compress=True, ) ], ), ], ) # tag the cloudfront distribution core.Tag.add(kibana_distribution, "project", constants["PROJECT_TAG"]) # needs api and bucket to be available kibana_distribution.node.add_dependency(kibana_api) # kibana bucket empty policies kibana_bucket_empty_policy = [ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["s3:ListBucket"], resources=["*"], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["s3:DeleteObject",], resources=[f"{kibana_bucket.bucket_arn}/*"], ), ] # create the custom resource kibana_bucket_empty = CustomResource( self, "kibana_bucket_empty", PhysicalId="kibanaBucketEmpty", Description="Empty kibana cache s3 bucket", Uuid="f7d4f730-4ee1-13e8-9c2d-fa7ae06bbebc", HandlerPath=os.path.join(dirname, "../helpers/s3_bucket_empty.py"), BucketName=kibana_bucket.bucket_name, ResourcePolicies=kibana_bucket_empty_policy, ) # tag the lamdbda core.Tag.add(kibana_bucket_empty, "project", constants["PROJECT_TAG"]) # needs a dependancy kibana_bucket_empty.node.add_dependency(kibana_bucket) # kibana lambda update policies kibana_lambda_update_policy = [ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "s3:ListBucket", "s3:ListAllMyBuckets", "lambda:ListFunctions", "lambda:UpdateFunctionConfiguration", "cloudfront:ListDistributions", "s3:GetBucketTagging", "es:ListDomainNames", "es:DescribeElasticsearchDomain", ], resources=["*"], ) ] # create the kibana lambda update kibana_lambda_update = CustomResource( self, "kibana_lambda_update", Description="Update ENV vars for kibana api lambda", PhysicalId="kibanaLambdaUpdate", Uuid="f7d4f230-4ee1-07e8-9c2d-fa7ae06bbebc", HandlerPath=os.path.join(dirname, "../helpers/lambda_env_update.py"), ResourcePolicies=kibana_lambda_update_policy, ) # tag the lamdbda core.Tag.add(kibana_lambda_update, "project", constants["PROJECT_TAG"]) # needs a dependancy kibana_lambda_update.node.add_dependency(kibana_bucket) kibana_lambda_update.node.add_dependency(kibana_distribution) core.CfnOutput( self, "kibana_link", value=f"https://{kibana_distribution.domain_name}/_plugin/kibana", description="Kibana Web Url", export_name="kibana-link", )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # store dynamodb_table = dynamodb.Table( self, 'dynamodb_table', table_name=f'{PROJECT}_{STAGE}', partition_key=dynamodb.Attribute( name='date', type=dynamodb.AttributeType.STRING), billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST, point_in_time_recovery=False, removal_policy=core.RemovalPolicy.DESTROY, server_side_encryption=True, ) # public api public_api = appsync.CfnGraphQLApi( self, 'public_api', name=f'{PROJECT}_{STAGE}', authentication_type='API_KEY', ) now = time.localtime() epoch = time.mktime(now) public_api_key = appsync.CfnApiKey( self, 'public_api_key', api_id=public_api.attr_api_id, expires=epoch + core.Duration.days(90).to_seconds(), ) with open('schema.gql', mode='r') as f: graphql_schema = f.read() appsync.CfnGraphQLSchema(self, 'public_api_schema', api_id=public_api.attr_api_id, definition=graphql_schema) public_api_role = iam.Role( self, 'public_api_role', assumed_by=iam.ServicePrincipal('appsync.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'AmazonDynamoDBFullAccess') ], ) public_api_datasource = appsync.CfnDataSource( self, 'public_api_datasource', api_id=public_api.attr_api_id, name=f'{PROJECT}_{STAGE}_dynamodb', type='AMAZON_DYNAMODB', dynamo_db_config={ 'awsRegion': 'us-east-1', 'tableName': dynamodb_table.table_name, }, service_role_arn=public_api_role.role_arn, ) with open('mapping_templates/get_holiday.json', mode='r') as f: get_holiday_json = f.read() appsync.CfnResolver( self, 'public_api_resolver_get_holiday', api_id=public_api.attr_api_id, type_name='Query', field_name='getHoliday', data_source_name=public_api_datasource.attr_name, kind='UNIT', request_mapping_template=get_holiday_json, response_mapping_template='$util.toJson($context.result)', ) with open('mapping_templates/list_holidays.json', mode='r') as f: list_holidays_json = f.read() appsync.CfnResolver( self, 'public_api_resolver_list_holidays', api_id=public_api.attr_api_id, type_name='Query', field_name='listHolidays', data_source_name=public_api_datasource.attr_name, kind='UNIT', request_mapping_template=list_holidays_json, response_mapping_template='$util.toJson($context.result)', ) # lambda source code upload to s3 lambda_assets = s3_assets.Asset(self, 'lambda_assets', path='./function/.artifact/') # update function func_api = lambda_.Function( self, f'{PROJECT}-{STAGE}-func', function_name=f'{PROJECT}-{STAGE}-func', code=lambda_.Code.from_bucket(bucket=lambda_assets.bucket, key=lambda_assets.s3_object_key), handler='app.handler', runtime=lambda_.Runtime.PYTHON_3_7, timeout=core.Duration.seconds(120), log_retention=logs.RetentionDays.SIX_MONTHS, memory_size=128, tracing=lambda_.Tracing.ACTIVE, ) func_api.add_environment('TABLE_NAME', dynamodb_table.table_name) func_api.add_environment('CSV_URL', CSV_URL) func_api.add_to_role_policy( iam.PolicyStatement( actions=[ 'dynamodb:Get*', 'dynamodb:Put*', 'dynamodb:Batch*', ], resources=[dynamodb_table.table_arn], )) # schedule execute events.Rule( self, f'{PROJECT}-{STAGE}-schedule', enabled=True, schedule=events.Schedule.rate(core.Duration.days(10)), targets=[events_targets.LambdaFunction(func_api)], ) # lambda@edge func_lambdaedge = lambda_.Function( self, f'{PROJECT}-{STAGE}-func-lambdaedge', function_name=f'{PROJECT}-{STAGE}-func-lambdaedge', code=lambda_.Code.from_inline( open('./function/src/lambdaedge.py').read().replace( '__X_API_KEY__', public_api_key.attr_api_key)), handler='index.handler', runtime=lambda_.Runtime.PYTHON_3_7, timeout=core.Duration.seconds(30), memory_size=128, role=iam.Role( self, f'{PROJECT}-{STAGE}-func-lambdaedge-role', assumed_by=iam.CompositePrincipal( iam.ServicePrincipal('edgelambda.amazonaws.com'), iam.ServicePrincipal('lambda.amazonaws.com'), ), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaBasicExecutionRole'), ], ), ) lambdaedge_version = func_lambdaedge.add_version( hashlib.sha256( open('./function/src/lambdaedge.py').read().replace( '__X_API_KEY__', public_api_key.attr_api_key).encode()).hexdigest()) # ACM certificates = acm.Certificate( self, 'certificates', domain_name=DOMAIN, validation_method=acm.ValidationMethod.DNS, ) # CDN cdn = cloudfront.CloudFrontWebDistribution( self, f'{PROJECT}-{STAGE}-cloudfront', origin_configs=[ cloudfront.SourceConfiguration( behaviors=[ # default behavior cloudfront.Behavior( allowed_methods=cloudfront. CloudFrontAllowedMethods.ALL, default_ttl=core.Duration.seconds(0), max_ttl=core.Duration.seconds(0), min_ttl=core.Duration.seconds(0), is_default_behavior=True, lambda_function_associations=[ cloudfront.LambdaFunctionAssociation( event_type=cloudfront.LambdaEdgeEventType. ORIGIN_REQUEST, lambda_function=lambdaedge_version, ), ]) ], custom_origin_source=cloudfront.CustomOriginConfig( domain_name=core.Fn.select( 2, core.Fn.split('/', public_api.attr_graph_ql_url)), ), ) ], alias_configuration=cloudfront.AliasConfiguration( acm_cert_ref=certificates.certificate_arn, names=[DOMAIN], security_policy=cloudfront.SecurityPolicyProtocol. TLS_V1_2_2018, ), price_class=cloudfront.PriceClass.PRICE_CLASS_ALL, ) core.CfnOutput( self, 'cloudfront-domain', value=cdn.domain_name, )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # DDB table to store the Long and Short URLs with Short URL as the partition key url_mapping_table = ddb.Table( self, "url_shortener_mapping_table", partition_key=ddb.Attribute(name="short_url", type=ddb.AttributeType.STRING), read_capacity=10, write_capacity=10, removal_policy=core.RemovalPolicy.DESTROY, ) # AutoScaling of RCUs with a Target Utilization of 70% url_mapping_table.auto_scale_read_capacity( min_capacity=10, max_capacity=40000).scale_on_utilization( target_utilization_percent=70) # AutoScaling of WCUs with a Target Utilization of 70% url_mapping_table.auto_scale_write_capacity( min_capacity=10, max_capacity=40000).scale_on_utilization( target_utilization_percent=70) # DDB table to keep track of an Atomic Counter used for generating Short URLs url_counter_table = ddb.Table( self, "url_shortener_counter_table", partition_key=ddb.Attribute(name="id", type=ddb.AttributeType.STRING), read_capacity=10, write_capacity=10, removal_policy=core.RemovalPolicy.DESTROY, ) # AutoScaling of RCUs with a Target Utilization of 70% url_counter_table.auto_scale_read_capacity( min_capacity=10, max_capacity=40000).scale_on_utilization( target_utilization_percent=70) # AutoScaling of WCUs with a Target Utilization of 70% url_counter_table.auto_scale_write_capacity( min_capacity=10, max_capacity=40000).scale_on_utilization( target_utilization_percent=70) # Lambda function with custom code to handle shortening/unshortening logic url_lambda = _lambda.Function( self, "url_shortener_lambda", code=_lambda.Code.asset("lambda_proxy"), handler="lambda_function.lambda_handler", runtime=_lambda.Runtime.PYTHON_3_8, timeout=core.Duration.seconds(10), environment={ "BACKOFF": "25", "HASH_DIGEST_SIZE": "8", "MAX_RETRIES": "3", "URL_SHORTENER_MAPPING_TABLE": url_mapping_table.table_name, "URL_SHORTENER_COUNTER_TABLE": url_counter_table.table_name, }, log_retention=logs.RetentionDays.ONE_MONTH, ) # A Custom IAM Policy statement to grant DDB access to the Lambda function ddb_policy_statement = iam.PolicyStatement( actions=[ "dynamodb:PutItem", "dynamodb:GetItem", "dynamodb:UpdateItem" ], effect=iam.Effect.ALLOW, resources=[ url_mapping_table.table_arn, url_counter_table.table_arn ], ) # Attaching DDB Policy statement with the Lambda IAM Role url_lambda.add_to_role_policy(ddb_policy_statement) # Including X-Requested-With to the default CORS headers list headers = apigw.Cors.DEFAULT_HEADERS headers.append('X-Requested-With') # API Gateway endpoint to serve Shorten/Unshorten APIs url_rest_api = apigw.RestApi( self, "url_shortener_API", default_cors_preflight_options=apigw.CorsOptions( allow_origins=apigw.Cors.ALL_ORIGINS, allow_headers=headers, allow_methods=["POST", "GET", "OPTIONS"], status_code=200, ), ) # Shorten API using POST and Lambda proxy url_rest_api.root.add_resource(path_part="shorten", ).add_method( http_method="POST", request_models={ "application/json": apigw.Model.EMPTY_MODEL, }, integration=apigw.LambdaIntegration( handler=url_lambda, proxy=True, allow_test_invoke=True, ), ) # Unshorten API using GET and Lambda proxy url_rest_api.root.add_resource(path_part="unshorten", ).add_resource( path_part="{shorturl}").add_method( http_method="GET", request_models={ "application/json": apigw.Model.EMPTY_MODEL, }, integration=apigw.LambdaIntegration( handler=url_lambda, proxy=True, allow_test_invoke=True, ), ) # S3 bucket to host the URL Shortener Static Website s3_web_hosting = s3.Bucket( self, "url_shortener_web_hosting_bucket", website_index_document="index.html", ) # Uploading HTML and ICO files from local directory to S3 Static Website bucket s3_deploy = s3deploy.BucketDeployment( self, "website_source_files", sources=[s3deploy.Source.asset(path="website", )], destination_bucket=s3_web_hosting, ) # Lambda function to integrate the API GW Shorten endpoint with the HTML file stored in S3 cr_provider = _lambda.Function( self, "cr_provider", code=_lambda.Code.asset("custom_resource"), handler="lambda_function.lambda_handler", runtime=_lambda.Runtime.PYTHON_3_8, timeout=core.Duration.minutes(1), ) # A Custom IAM Policy statement to grant S3 access to the Lambda function lambda_cr_statement = iam.PolicyStatement( actions=["s3:List*", "s3:Get*", "s3:Put*"], effect=iam.Effect.ALLOW, resources=[ s3_web_hosting.bucket_arn, s3_web_hosting.bucket_arn + "/*" ]) cr_provider.add_to_role_policy(lambda_cr_statement) # CFN Custom Resource backed by Lambda lambda_cr = core.CustomResource( self, "lambda_cr", service_token=cr_provider.function_arn, properties={ "S3_BUCKET": s3_web_hosting.bucket_name, "S3_KEY": "index.html", "POST_URL": url_rest_api.url + "shorten", }, removal_policy=core.RemovalPolicy.DESTROY, ) # Adding dependency so that Custom Resource creation happens after files are uploaded to S3 lambda_cr.node.add_dependency(s3_deploy) # CloudFront Distribution with S3 and APIGateway origins url_cf_distribution = cf.CloudFrontWebDistribution( self, "url_shortener_distribution", origin_configs=[ cf.SourceConfiguration(s3_origin_source=cf.S3OriginConfig( s3_bucket_source=s3_web_hosting, origin_access_identity=cf.OriginAccessIdentity( self, id="OAI", comment= "OAI that allows CloudFront to access the S3 bucket"), ), behaviors=[ cf.Behavior( is_default_behavior=False, path_pattern="/index.html", ), cf.Behavior( is_default_behavior=False, path_pattern="/favicon.ico", ), ]), cf.SourceConfiguration( custom_origin_source=cf.CustomOriginConfig( domain_name=url_rest_api.url.lstrip("https://").split( "/")[0], ), origin_path="/" + url_rest_api.deployment_stage.stage_name + "/unshorten", behaviors=[ cf.Behavior( is_default_behavior=True, allowed_methods=cf.CloudFrontAllowedMethods. GET_HEAD_OPTIONS, ) ]) ], price_class=cf.PriceClass.PRICE_CLASS_ALL, default_root_object="index.html", ) # Adding the CloudFront Distribution endpoint to CFN Output core.CfnOutput( self, "URLShortenerWebsite", value=url_cf_distribution.domain_name, )
def __init__(self, scope: core.Construct, id: str, s3bucket, acmcert, hostedzone, alb=elbv2.ApplicationLoadBalancer, **kwargs) -> None: super().__init__(scope, id, **kwargs) prj_name = self.node.try_get_context("project_name") env_name = self.node.try_get_context("env") domain_name = self.node.try_get_context("domain_name") bucketName = s3.Bucket.from_bucket_name(self, 's3bucket', s3bucket) path_patterns = ["/static/*", "/templates/*"] self.cdn_id = cdn.CloudFrontWebDistribution( self, 'webhosting-cdn', origin_configs=[ cdn.SourceConfiguration( # origin_path="/", s3_origin_source=cdn.S3OriginConfig( s3_bucket_source=bucketName, origin_access_identity=cdn.OriginAccessIdentity( self, 'webhosting-origin')), behaviors=[ cdn.Behavior( path_pattern=path_pattern, allowed_methods=cdn.CloudFrontAllowedMethods.ALL, cached_methods=cdn.CloudFrontAllowedCachedMethods. GET_HEAD, ) for path_pattern in path_patterns ], ), cdn.SourceConfiguration( custom_origin_source=cdn.CustomOriginConfig( domain_name=alb.load_balancer_dns_name, origin_protocol_policy=cdn.OriginProtocolPolicy. MATCH_VIEWER), behaviors=[ cdn.Behavior( is_default_behavior=True, allowed_methods=cdn.CloudFrontAllowedMethods.ALL, forwarded_values={ "query_string": True, "cookies": { "forward": "all" }, "headers": ['*'] }, ) ]) ], error_configurations=[ cdn.CfnDistribution.CustomErrorResponseProperty( error_code=400, response_code=200, response_page_path="/"), cdn.CfnDistribution.CustomErrorResponseProperty( error_code=403, response_code=200, response_page_path="/"), cdn.CfnDistribution.CustomErrorResponseProperty( error_code=404, response_code=200, response_page_path="/") ], alias_configuration=cdn.AliasConfiguration( acm_cert_ref=acmcert.certificate_arn, names=[env_name + '.' + domain_name])) r53.ARecord( self, 'dev-record', zone=hostedzone, target=r53.RecordTarget.from_alias( alias_target=r53targets.CloudFrontTarget(self.cdn_id)), #target=r53.RecordTarget.from_alias(alias_target="d1w8o2vctuxdpo.cloudfront.net"), record_name='dev') ssm.StringParameter(self, 'cdn-dist-id', parameter_name='/' + env_name + '/app-distribution-id', string_value=self.cdn_id.distribution_id) ssm.StringParameter(self, 'cdn-url', parameter_name='/' + env_name + '/app-cdn-url', string_value='https://' + self.cdn_id.domain_name)