def __init__(self, scope: Construct, id: str, settings, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.id = id # cdk deploy --parameters StageName=v1 stage = CfnParameter( self, "StageName", default="v1", description="The name of the API Gateway Stage.", type="String", ).value_as_string table_name = f"{id}Table" # Create a dynamodb table table = self.create_dynamodb_table(table_name) # Create function and role for OAuth func_oauth_role = self.create_func_oauth_execution_role(f"{id}-OAuth", table_arn=table.table_arn) func_oauth = self.create_lambda("OAuth", custom_role=func_oauth_role) func_oauth.add_environment("SlackAppClientIdParameterKey", CLIENT_ID_PARAMETER_NAME) func_oauth.add_environment("SlackAppClientSecretParameterKey", CLIENT_SECRET_PARAMETER_NAME) func_oauth.add_environment("SlackAppOAuthDynamoDBTable", table_name) func_oauth.add_environment("SlackChannelIds", ",".join(get_channel_ids(settings))) func_oauth.add_environment("SlackTeamIds", ",".join(get_team_ids(settings))) api = apigw_.LambdaRestApi( self, f"{id}-API", description=f"{id} API", endpoint_configuration=apigw_.EndpointConfiguration(types=[apigw_.EndpointType.REGIONAL]), handler=func_oauth, deploy=False, proxy=False, ) item = api.root.add_resource("oauth2") item.add_method("ANY", apigw_.LambdaIntegration(func_oauth)) # Create APIGW Loggroup for setting retention LogGroup( self, f"{id}-API-LogGroup", log_group_name=f"API-Gateway-Execution-Logs_{api.rest_api_id}/{stage}", retention=RetentionDays.ONE_DAY, ) # Do a new deployment on specific stage new_deployment = apigw_.Deployment(self, f"{id}-API-Deployment", api=api) apigw_.Stage( self, f"{id}-API-Stage", data_trace_enabled=True, description=f"{stage} environment", deployment=new_deployment, logging_level=apigw_.MethodLoggingLevel.INFO, metrics_enabled=True, stage_name=stage, tracing_enabled=False, )
def __init__(self, scope: Construct, id: str, settings, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.id = id # cdk deploy --parameters StageName=v1 stage = CfnParameter( self, "StageName", default="v1", description="The name of the API Gateway Stage.", type="String", ).value_as_string # Create function AsyncWorker self.func_async_worker = self.create_lambda("AsyncWorker", custom_role=None) # Create function SyncWorker self.func_sync_worker = self.create_lambda("SyncWorker", custom_role=None) # Create function and role for ImmediateResponse func_immediate_response_role = self.create_immediate_response_execution_role( f"{id}-ImmediateResponse", settings["ssmparametertokenkey"]) func_immediate_response = self.create_lambda( "ImmediateResponse", custom_role=func_immediate_response_role) func_immediate_response.add_environment( "SlackAppTokenParameterKey", settings["ssmparametertokenkey"]) func_immediate_response.add_environment("SlackCommand", settings["command"]) func_immediate_response.add_environment( "AsyncWorkerLambdaFunctionName", f"{id}-AsyncWorker") func_immediate_response.add_environment("SyncWorkerLambdaFunctionName", f"{id}-SyncWorker") func_immediate_response.add_environment( "SlackChannelIds", ",".join(get_channel_ids(settings))) func_immediate_response.add_environment( "SlackDomains", ",".join(get_team_domains(settings))) func_immediate_response.add_environment( "SlackTeamIds", ",".join(get_team_ids(settings))) api = apigw_.LambdaRestApi( self, f"{id}-API", description=f"{id} API", endpoint_configuration=apigw_.EndpointConfiguration( types=[apigw_.EndpointType.EDGE]), handler=func_immediate_response, deploy=False, ) # Create APIGW Loggroup for setting retention LogGroup( self, f"{id}-API-LogGroup", log_group_name= f"API-Gateway-Execution-Logs_{api.rest_api_id}/{stage}", retention=RetentionDays.ONE_DAY, ) # Do a new deployment on specific stage new_deployment = apigw_.Deployment(self, f"{id}-API-Deployment", api=api) apigw_.Stage( self, f"{id}-API-Stage", data_trace_enabled=False, description=f"{stage} environment", deployment=new_deployment, logging_level=apigw_.MethodLoggingLevel.ERROR, metrics_enabled=True, stage_name=stage, tracing_enabled=False, )
def __init__(self, scope: core.Construct, id: str, resources: FsiSharedResources, subnet_group_name: str = 'Default', **kwargs) -> None: super().__init__(scope, id, **kwargs) # Configure the container resources... self.repo = assets.DockerImageAsset(self, 'Repo', directory='src/fsi/earnings', file='Dockerfile') code = lambda_.DockerImageCode.from_ecr( repository=self.repo.repository, tag=self.repo.image_uri.split(':')[-1]) # Configure security policies... role = iam.Role( self, 'Role', assumed_by=iam.ServicePrincipal(service='lambda'), description='HomeNet-{}-Fsi-EarningsReport'.format( resources.landing_zone.zone_name), role_name='fsi-earnings@homenet.{}.{}'.format( resources.landing_zone.zone_name, core.Stack.of(self).region).lower(), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name= 'service-role/AWSLambdaVPCAccessExecutionRole'), ]) # Grant any permissions... self.earnings_table = d.Table( self, 'EarningCalendar', table_name='FsiCoreSvc-EarningsCalendar', billing_mode=d.BillingMode.PAY_PER_REQUEST, partition_key=d.Attribute(name='PartitionKey', type=d.AttributeType.STRING), sort_key=d.Attribute(name='SortKey', type=d.AttributeType.STRING), time_to_live_attribute='Expiration', point_in_time_recovery=True, server_side_encryption=True) self.earnings_table.grant_read_write_data(role) # Define any variables for the function self.function_env = { 'CACHE_TABLE': self.earnings_table.table_name, } # Create the backing webapi compute ... self.function = lambda_.DockerImageFunction( self, 'Function', code=code, role=role, function_name='HomeNet-{}-Fsi-{}'.format( resources.landing_zone.zone_name, FsiEarningsGateway.__name__), description='Python Lambda function for ' + FsiEarningsGateway.__name__, timeout=core.Duration.seconds(30), tracing=lambda_.Tracing.ACTIVE, vpc=resources.landing_zone.vpc, log_retention=logs.RetentionDays.FIVE_DAYS, memory_size=128, allow_all_outbound=True, vpc_subnets=ec2.SubnetSelection( subnet_group_name=subnet_group_name), security_groups=[resources.landing_zone.security_group], environment=self.function_env, ) # Bind APIG to Lambda compute... self.frontend_proxy = a.LambdaRestApi( self, 'ApiGateway', proxy=True, handler=self.function, options=a.RestApiProps( description='Hosts the Earnings Calendar Services via ' + self.function.function_name, domain_name=a.DomainNameOptions( domain_name='earnings.trader.fsi', certificate=Certificate.from_certificate_arn( self, 'Certificate', certificate_arn= 'arn:aws:acm:us-east-2:581361757134:certificate/4e3235f7-49a1-42a5-a671-f2449b45f72d' ), security_policy=a.SecurityPolicy.TLS_1_0), policy=iam.PolicyDocument(statements=[ iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=['execute-api:Invoke'], principals=[iam.AnyPrincipal()], resources=['*'], conditions={ 'IpAddress': { 'aws:SourceIp': [ '10.0.0.0/8', '192.168.0.0/16', '72.90.160.65/32' ] } }) ]), endpoint_configuration=a.EndpointConfiguration( types=[a.EndpointType.REGIONAL], ))) # Register Dns Name r53.ARecord(self, 'AliasRecord', zone=resources.trader_dns_zone, record_name='earnings.%s' % resources.trader_dns_zone.zone_name, target=r53.RecordTarget.from_alias( dns_targets.ApiGateway(self.frontend_proxy)))
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # Create two VPCs - one to host our private website, the other to act as a client website_vpc = Vpc( self, "WEBSITEVPC", cidr="10.0.0.0/16", ) client_vpc = Vpc( self, "ClientVPC", cidr="10.1.0.0/16", ) # Create a bastion host in the client API which will act like our client workstation bastion = BastionHostLinux( self, "WEBClient", vpc=client_vpc, instance_name='my-bastion', instance_type=InstanceType('t3.micro'), machine_image=AmazonLinuxImage(), subnet_selection=SubnetSelection(subnet_type=SubnetType.PRIVATE), security_group=SecurityGroup( scope=self, id='bastion-sg', security_group_name='bastion-sg', description= 'Security group for the bastion, no inbound open because we should access' ' to the bastion via AWS SSM', vpc=client_vpc, allow_all_outbound=True)) # Set up a VPC peering connection between client and API VPCs, and adjust # the routing table to allow connections back and forth VpcPeeringHelper(self, 'Peering', website_vpc, client_vpc) # Create VPC endpoints for API gateway vpc_endpoint = InterfaceVpcEndpoint( self, 'APIGWVpcEndpoint', vpc=website_vpc, service=InterfaceVpcEndpointAwsService.APIGATEWAY, private_dns_enabled=True, ) vpc_endpoint.connections.allow_from(bastion, Port.tcp(443)) endpoint_id = vpc_endpoint.vpc_endpoint_id api_policy = iam.PolicyDocument(statements=[ iam.PolicyStatement(principals=[iam.AnyPrincipal()], actions=['execute-api:Invoke'], resources=['execute-api:/*'], effect=iam.Effect.DENY, conditions={ "StringNotEquals": { "aws:SourceVpce": endpoint_id } }), iam.PolicyStatement(principals=[iam.AnyPrincipal()], actions=['execute-api:Invoke'], resources=['execute-api:/*'], effect=iam.Effect.ALLOW) ]) # Create an s3 bucket to hold the content content_bucket = s3.Bucket(self, "ContentBucket", removal_policy=core.RemovalPolicy.DESTROY) # Upload our static content to the bucket s3dep.BucketDeployment(self, "DeployWithInvalidation", sources=[s3dep.Source.asset('website')], destination_bucket=content_bucket) # Create a private API GW in the API VPC api = apigw.RestApi(self, 'PrivateS3Api', endpoint_configuration=apigw.EndpointConfiguration( types=[apigw.EndpointType.PRIVATE], vpc_endpoints=[vpc_endpoint]), policy=api_policy) # Create a role to allow API GW to access our S3 bucket contents role = iam.Role( self, "Role", assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com")) role.add_to_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, resources=[ content_bucket.bucket_arn, content_bucket.bucket_arn + '/*' ], actions=["s3:Get*"])) # Create a proxy resource that captures all non-root resource requests resource = api.root.add_resource("{proxy+}") # Create an integration with S3 resource_integration = apigw.Integration( type=apigw.IntegrationType.AWS, integration_http_method='GET', options=apigw.IntegrationOptions( request_parameters= { # map the proxy parameter so we can pass the request path "integration.request.path.proxy": "method.request.path.proxy" }, integration_responses=[ apigw.IntegrationResponse( status_code='200', response_parameters= { # map the content type of the S3 object back to the HTTP response "method.response.header.Content-Type": "integration.response.header.Content-Type" }) ], credentials_role=role), # reference the bucket content we want to retrieve uri='arn:aws:apigateway:eu-west-1:s3:path/%s/{proxy}' % (content_bucket.bucket_name)) # handle the GET request and map it to our new integration resource.add_method( "GET", resource_integration, method_responses=[ apigw.MethodResponse(status_code='200', response_parameters={ "method.response.header.Content-Type": False }) ], request_parameters={"method.request.path.proxy": True}) # Handle requests to the root of our site # Create another integration with S3 - this time with no proxy parameter resource_integration = apigw.Integration( type=apigw.IntegrationType.AWS, integration_http_method='GET', options=apigw.IntegrationOptions( integration_responses=[ apigw.IntegrationResponse( status_code='200', response_parameters= { # map the content type of the S3 object back to the HTTP response "method.response.header.Content-Type": "integration.response.header.Content-Type" }) ], credentials_role=role), # reference the bucket content we want to retrieve uri='arn:aws:apigateway:eu-west-1:s3:path/%s/index.html' % (content_bucket.bucket_name)) # handle the GET request and map it to our new integration api.root.add_method("GET", resource_integration, method_responses=[ apigw.MethodResponse( status_code='200', response_parameters={ "method.response.header.Content-Type": False }) ])
def __init__(self, scope: core.Construct, id: str, infra: RtspBaseResourcesConstruct, subnet_group_name: str = 'Default', **kwargs) -> None: super().__init__(scope, id, **kwargs) core.Tags.of(self).add(key='Source', value=PhotosApiConstruct.__name__) # Configure the container resources... self.repo = assets.DockerImageAsset(self, 'Repo', directory='src/rtsp/photo-api', file='Dockerfile') code = lambda_.DockerImageCode.from_ecr( repository=self.repo.repository, tag=self.repo.image_uri.split(':')[-1]) # Configure security policies... role = iam.Role( self, 'Role', assumed_by=iam.ServicePrincipal(service='lambda'), description='HomeNet-{}-PhotoApi'.format( infra.landing_zone.zone_name), role_name='rtsp-photoapi@homenet.{}.{}'.format( infra.landing_zone.zone_name, core.Stack.of(self).region), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name= 'service-role/AWSLambdaVPCAccessExecutionRole'), iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name='AmazonS3ReadOnlyAccess') ]) infra.bucket.grant_read(role) infra.face_table.grant_read_write_data(role) # Define any variables for the function self.function_env = { 'FACE_TABLE': infra.face_table.table_name, 'REGION': core.Stack.of(self).region, } # Create the backing webapi compute ... self.function = lambda_.DockerImageFunction( self, 'Function', code=code, role=role, function_name='HomeNet-PhotoApi', description='Python Lambda function for ' + PhotosApiConstruct.__name__, timeout=core.Duration.seconds(30), tracing=lambda_.Tracing.ACTIVE, vpc=infra.landing_zone.vpc, log_retention=RetentionDays.FIVE_DAYS, memory_size=128, allow_all_outbound=True, vpc_subnets=ec2.SubnetSelection( subnet_group_name=subnet_group_name), security_groups=[infra.security_group], environment=self.function_env, ) # Bind APIG to Lambda compute... # Calls need to use https://photos-api.virtual.world self.frontend_proxy = a.LambdaRestApi( self, 'ApiGateway', proxy=True, handler=self.function, options=a.RestApiProps( description='Photo-Api proxy for ' + self.function.function_name, binary_media_types=['image/png', 'image/jpg', 'image/bmp'], domain_name=a.DomainNameOptions( domain_name='photos-api.virtual.world', certificate=Certificate.from_certificate_arn( self, 'Certificate', certificate_arn= 'arn:aws:acm:us-east-1:581361757134:certificate/c91263e7-882e-441d-aa2f-717074aed6d0' ), security_policy=a.SecurityPolicy.TLS_1_0), policy=iam.PolicyDocument(statements=[ iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=['execute-api:Invoke'], principals=[iam.AnyPrincipal()], resources=['*'], conditions={ 'IpAddress': { 'aws:SourceIp': [ '10.0.0.0/8', '192.168.0.0/16', '72.90.160.65/32' ] } }) ]), endpoint_configuration=a.EndpointConfiguration( types=[a.EndpointType.REGIONAL], #vpc_endpoints=[ # infra.landing_zone.vpc_endpoints.interfaces['execute-api'] #] )))
def __init__(self, scope: core.Construct, id: str, resources:FsiSharedResources, subnet_group_name:str='Default', **kwargs) -> None: super().__init__(scope, id, **kwargs) # Configure the container resources... self.repo = assets.DockerImageAsset(self,'Repo', directory='src/fsi/account-linking', file='Dockerfile') code = lambda_.DockerImageCode.from_ecr( repository=self.repo.repository, tag=self.repo.image_uri.split(':')[-1]) # Configure security policies... role = iam.Role(self,'Role', assumed_by=iam.ServicePrincipal(service='lambda'), description='HomeNet-{}-Fsi-AccountLinking'.format(resources.landing_zone.zone_name), role_name='fsi-accountlinking@homenet.{}.{}'.format( resources.landing_zone.zone_name, core.Stack.of(self).region), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name='service-role/AWSLambdaVPCAccessExecutionRole'), ]) # Grant any permissions... resources.tda_secret.grant_write(role) # Define any variables for the function self.function_env = { 'REGION': core.Stack.of(self).region, 'TDA_SECRET_ID': resources.tda_secret.secret_arn, 'TDA_REDIRECT_URI': ssm.StringParameter.from_string_parameter_name(self,'TDA_REDIRECT_URI', string_parameter_name='/HomeNet/Amertitrade/redirect_uri').string_value, 'TDA_CLIENT_ID': ssm.StringParameter.from_string_parameter_name(self, 'TDA_CLIENT_ID', string_parameter_name='/HomeNet/Ameritrade/client_id').string_value } # Create the backing webapi compute ... self.function = lambda_.DockerImageFunction(self,'Function', code = code, role= role, function_name='HomeNet-{}-Fsi-{}'.format( resources.landing_zone.zone_name, FsiAmeritradeAuthGateway.__name__), description='Python Lambda function for '+FsiAmeritradeAuthGateway.__name__, timeout= core.Duration.seconds(30), tracing= lambda_.Tracing.ACTIVE, vpc= resources.landing_zone.vpc, log_retention= logs.RetentionDays.FIVE_DAYS, memory_size=128, allow_all_outbound=True, vpc_subnets=ec2.SubnetSelection(subnet_group_name=subnet_group_name), security_groups=[resources.landing_zone.security_group], environment=self.function_env, ) # Bind APIG to Lambda compute... self.frontend_proxy = a.LambdaRestApi(self,'ApiGateway', proxy=True, handler=self.function, options=a.RestApiProps( description='Hosts the Ameritrade Auth Callback via '+self.function.function_name, domain_name= a.DomainNameOptions( domain_name='auth.trader.fsi', certificate=Certificate.from_certificate_arn(self,'Certificate', certificate_arn= 'arn:aws:acm:us-east-2:581361757134:certificate/0d1fc756-ebd6-4660-83a8-814c0976a8c2'), security_policy= a.SecurityPolicy.TLS_1_0), policy= iam.PolicyDocument( statements=[ iam.PolicyStatement( effect= iam.Effect.ALLOW, actions=['execute-api:Invoke'], principals=[iam.AnyPrincipal()], resources=['*'], conditions={ 'IpAddress':{ 'aws:SourceIp': ['10.0.0.0/8','192.168.0.0/16','72.90.160.65/32'] } } ) ] ), endpoint_configuration= a.EndpointConfiguration( types = [ a.EndpointType.REGIONAL], ) )) # Register Dns Name r53.ARecord(self,'AliasRecord', zone=resources.trader_dns_zone, record_name='auth.%s' % resources.trader_dns_zone.zone_name, target= r53.RecordTarget.from_alias(dns_targets.ApiGateway(self.frontend_proxy)))