def __init__(self, scope: core.Construct, id: str, domain: es.CfnDomain, **kwargs) -> None: super().__init__(scope, id, **kwargs) arn = sdk.AwsCustomResource( self, 'esConfig', policy=sdk.AwsCustomResourcePolicy.from_sdk_calls( resources=[domain.domain_arn]), on_create=sdk.AwsSdkCall( action='updateElasticsearchDomainConfig', service='ES', physical_resource_id=sdk.PhysicalResourceId.of( "updateElasticsearchDomainConfig"), output_path="DomainConfig.ElasticsearchClusterConfig", parameters={ "DomainName": domain.domain_name, "ElasticsearchClusterConfig": { "WarmCount": self.node.try_get_context("elastic")["warm"]["count"], "WarmEnabled": True, "WarmType": self.node.try_get_context("elastic")["warm"]["type"] } }, ), ) arn.node.add_dependency(domain)
def create_userpool_client_secret( self, user_pool: aws_cognito.CfnUserPool, user_pool_client: aws_cognito.CfnUserPoolClient, tag: str, ) -> custom_resources.AwsCustomResource: """ :return: an AwsCustomResource that provides access to the user pool client secret in the response field `user_pool_client_secret` """ resource = custom_resources.AwsCustomResource( self, f"userpool_client_secret_{tag}", resource_type="Custom::UserPoolClientSecret", policy=custom_resources.AwsCustomResourcePolicy.from_statements([ aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=["cognito-idp:DescribeUserPoolClient"], resources=[ f"arn:aws:cognito-idp:{self.region}:{self.account}:userpool/{user_pool.ref}" # noqa: E501 ], ) ]), on_create=custom_resources.AwsSdkCall( physical_resource_id=custom_resources.PhysicalResourceId.of( user_pool_client.ref), service="CognitoIdentityServiceProvider", action="describeUserPoolClient", output_path="UserPoolClient.ClientSecret", parameters={ "ClientId": user_pool_client.ref, "UserPoolId": user_pool.ref }, ), on_update=custom_resources.AwsSdkCall( physical_resource_id=custom_resources.PhysicalResourceId.of( user_pool_client.ref), service="CognitoIdentityServiceProvider", action="describeUserPoolClient", output_path="UserPoolClient.ClientSecret", parameters={ "ClientId": user_pool_client.ref, "UserPoolId": user_pool.ref }, ), ) return resource
def get_on_create_update(self, codebuild_name): create_params = { "projectName": codebuild_name, } # api_version=None uses the latest api on_create = custom_resources.AwsSdkCall( action='startBuild', service='CodeBuild', parameters=create_params, physical_resource_id=custom_resources.PhysicalResourceId.of(f'{codebuild_name}-CR') ) return on_create
def __init__(self, scope: core.Construct, id: str, props: ParameterReaderProps, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.resource = cr.AwsCustomResource( self, 'get_parameters', policy=cr.AwsCustomResourcePolicy.from_sdk_calls( resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE), on_create=cr.AwsSdkCall( service='SSM', action='getParameter', parameters={ 'Name': props.parameterName, 'WithDecryption': props.with_decryption, }, region=props.region, physical_resource_id=cr.PhysicalResourceId.of( id=str(datetime.now()))))
def get_on_create_update(self, eks_name): create_params = { "name": eks_name, "logging": { "clusterLogging": [{ "enabled": True, "types": [ "api", "audit", "authenticator", "controllerManager", "scheduler" ] }] } } # api_version=None uses the latest api on_create = custom_resources.AwsSdkCall( action='updateClusterConfig', service='EKS', parameters=create_params, physical_resource_id=custom_resources.PhysicalResourceId.of( f'{eks_name}Log-CR')) return on_create
def __init__( self, scope: cdk.Construct, id: str, consoleme_alb: lb.ApplicationLoadBalancer, **kwargs ) -> None: super().__init__(scope, id, **kwargs) hosted_zone = route53.PublicHostedZone.from_hosted_zone_attributes( self, "HostedZone", hosted_zone_id=HOSTED_ZONE_ID, zone_name=HOSTED_ZONE_NAME, ) route53_record = route53.ARecord( self, "LBRecord", zone=hosted_zone, record_name=APPLICATION_PREFIX, target=route53.RecordTarget( alias_target=(route53_targets.LoadBalancerTarget(consoleme_alb)) ), ) verify_ses_identity = cr.AwsCustomResource( self, "VerifySESIdentityResource", policy=cr.AwsCustomResourcePolicy.from_statements( statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["ses:VerifyDomainIdentity", "ses:DeleteIdentity"], resources=["*"], ) ] ), on_create=cr.AwsSdkCall( service="SES", action="verifyDomainIdentity", parameters={"Domain": route53_record.domain_name}, physical_resource_id=cr.PhysicalResourceId.from_response( "VerificationToken" ), ), on_delete=cr.AwsSdkCall( service="SES", action="deleteIdentity", parameters={"Identity": route53_record.domain_name}, ), install_latest_aws_sdk=True, log_retention=logs.RetentionDays.ONE_WEEK, ) add_ses_dkim = cr.AwsCustomResource( self, "VerifySESDKIMResource", policy=cr.AwsCustomResourcePolicy.from_statements( statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["ses:VerifyDomainDkim"], resources=["*"], ) ] ), on_create=cr.AwsSdkCall( service="SES", action="verifyDomainDkim", parameters={"Domain": route53_record.domain_name}, physical_resource_id=cr.PhysicalResourceId.of( HOSTED_ZONE_ID + "VerifyDomainDKIM" ), ), install_latest_aws_sdk=True, log_retention=logs.RetentionDays.ONE_WEEK, ) add_ses_dkim.node.add_dependency(verify_ses_identity) certificate = acm.Certificate( self, "Certificate", domain_name="*." + hosted_zone.zone_name, validation=acm.CertificateValidation.from_dns(hosted_zone=hosted_zone), ) self.hosted_zone = hosted_zone self.certificate = certificate self.route53_record = route53_record
def custom_fsx_task(self, host_port: int, family: str, file_system_id: str, mad_secret_arn: str, mad_domain_name: str, task_role: iam.Role, execution_role: iam.Role): on_create_aws_sdk_call = custom_resources.AwsSdkCall( physical_resource_id=custom_resources.PhysicalResourceId.from_response( 'taskDefinition.taskDefinitionArn'), service="ECS", action= "registerTaskDefinition", # https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/ECS.html (watch out for camel case!) parameters={ "family": family, "taskRoleArn": task_role.role_arn, "executionRoleArn": execution_role.role_arn, "containerDefinitions": [{ "name": "IISContainer", "image": "microsoft/iis", "cpu": 512, "memory": 1024, "links": [], "portMappings": [{ "containerPort": 80, "hostPort": host_port, "protocol": "tcp" }], "essential": True, "entryPoint": ["powershell", "-Command"], "mountPoints": [ { "sourceVolume": file_system_id, "containerPath": 'C:\\fsx-windows-dir', "readOnly": False }, ], "command": [ '$IndexFilePath = "C:\\fsx-windows-dir\\index.html"; if ((Test-Path -Path $IndexFilePath) -ne $true){New-Item -Path $IndexFilePath -ItemType file -Value "<html> <head> <title>Amazon ECS Sample App</title> <style>body {margin-top: 40px; background-color: #ff3;} </style> </head><body> <div style=color:black;text-align:center> <h1>Amazon ECS Sample App</h1> <h2>Congratulations!</h2> <p>Your application is now running on a container in Amazon ECS.</p> <table style=margin-left:auto;margin-right:auto;><tr><th>TimeStamp</th><th>Task ID</th></tr>" -Force;}; $datetime = Get-Date -Format "yyyy-MM-dd HH:mm:ss"; $TaskId = (Invoke-RestMethod -Method GET -Uri $env:ECS_CONTAINER_METADATA_URI_V4/task).TaskARN.split("/")[2]; Add-Content -Path $IndexFilePath -Value "<tr><th>$datetime</th><th>$TaskId</th></tr>"; Copy-Item -Path $IndexFilePath -Destination C:\\inetpub\\wwwroot\\index.html -Force; C:\\ServiceMonitor.exe w3svc;' ] }], "volumes": [ { 'name': file_system_id, 'fsxWindowsFileServerVolumeConfiguration': { 'fileSystemId': file_system_id, 'rootDirectory': 'share', 'authorizationConfig': { 'credentialsParameter': mad_secret_arn, 'domain': mad_domain_name } } }, ], "requiresCompatibilities": ['EC2'] }) on_delete_aws_sdk_call = custom_resources.AwsSdkCall( physical_resource_id=custom_resources.PhysicalResourceId.from_response( 'taskDefinition.taskDefinitionArn'), service="ECS", action= "deregisterTaskDefinition", # https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/ECS.html (watch out for camel case!) parameters={ "taskDefinition": custom_resources.PhysicalResourceIdReference( ) # https://docs.aws.amazon.com/cdk/api/latest/docs/custom-resources-readme.html#physical-resource-id-parameter }) custom_task = custom_resources.AwsCustomResource( self, "FSXTaskResource", policy=custom_resources.AwsCustomResourcePolicy.from_statements( statements=[ iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=["ecs:*"], resources=["*"]), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["iam:PassRole"], resources=[task_role.role_arn, execution_role.role_arn]) ]), on_create=on_create_aws_sdk_call, on_update=on_create_aws_sdk_call, on_delete=on_delete_aws_sdk_call) task_definition_arn = custom_task.get_response_field( 'taskDefinition.taskDefinitionArn') return task_definition_arn
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) uri = self.account + '.dkr.ecr.' + self.region + '.amazonaws.com' appl = 'colorteller' buildspec = { 'version': '0.2', 'phases': { 'install': { 'commands': ['echo install step'] }, 'pre_build': { 'commands': [ 'echo logging in to AWS ECR...', '$(aws ecr get-login --no-include-email --region %s)' % self.region ] }, 'build': { 'commands': [ 'echo building Docker image...', 'cd appmeshdemo/colorapp/%s' % appl, 'docker build -t %s:latest .' % appl, 'docker tag %s:latest %s/%s:latest' % (appl, uri, appl) ] }, 'post_build': { 'commands': [ 'echo Docker image build complete!', 'echo push latest Docker images to ECR...', 'docker push %s/%s:latest' % (uri, appl) ] } } } buildenviron = codebuild.BuildEnvironment( privileged=True, build_image=codebuild.LinuxBuildImage.UBUNTU_14_04_DOCKER_18_09_0, environment_variables={ 'AWS_DEFAULT_REGION': codebuild.BuildEnvironmentVariable(value=self.region), 'AWS_ACCOUNT_ID': codebuild.BuildEnvironmentVariable(value=self.account), 'IMAGE_REPO_NAME': codebuild.BuildEnvironmentVariable(value=appl), 'IMAGE_TAG': codebuild.BuildEnvironmentVariable(value='latest') }) proj = codebuild.Project( self, appl, build_spec=codebuild.BuildSpec.from_object(buildspec), environment=buildenviron) call = custom.AwsSdkCall(service='CodeBuild', action='startBuild', parameters={'projectName': proj.project_name}, physical_resource_id='Custom%s' % proj.project_name) custom.AwsCustomResource(self, 'CustomCodeBuild', on_create=call, on_update=call)
def __init__(self, scope: cdk.Construct, id: str, cognito_user_pool: cognito.UserPool, s3_bucket_name: str, create_configuration_lambda_role_arn: str, redis: ec.CfnCacheCluster, domain_name: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) config_yaml = yaml.load(open("config.yaml"), Loader=yaml.FullLoader) spoke_accounts = config_yaml.get("spoke_accounts", []) cognito_user_pool_client = cognito.UserPoolClient( self, "UserPoolClient", user_pool=cognito_user_pool, generate_secret=True, supported_identity_providers=[ cognito.UserPoolClientIdentityProvider.COGNITO ], prevent_user_existence_errors=True, o_auth=cognito.OAuthSettings( callback_urls=[ "https://" + domain_name + "/auth", "https://" + domain_name + "/oauth2/idpresponse", ], logout_urls=["https://" + domain_name + "/logout"], flows=cognito.OAuthFlows(authorization_code_grant=True, implicit_code_grant=True), scopes=[cognito.OAuthScope.OPENID, cognito.OAuthScope.EMAIL], ), auth_flows=cognito.AuthFlow(user_password=True, user_srp=True), ) describe_cognito_user_pool_client = cr.AwsCustomResource( self, "UserPoolClientIDResource", policy=cr.AwsCustomResourcePolicy.from_sdk_calls( resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE), on_create=cr.AwsSdkCall( service="CognitoIdentityServiceProvider", action="describeUserPoolClient", parameters={ "UserPoolId": cognito_user_pool.user_pool_id, "ClientId": cognito_user_pool_client.user_pool_client_id, }, physical_resource_id=cr.PhysicalResourceId.of( cognito_user_pool_client.user_pool_client_id), ), install_latest_aws_sdk=True, log_retention=logs.RetentionDays.ONE_WEEK, ) cognito_user_pool_client_secret = ( describe_cognito_user_pool_client.get_response_field( "UserPoolClient.ClientSecret")) imported_create_configuration_lambda_role = iam.Role.from_role_arn( self, "ImportedCreateConfigurationFileLambdaRole", role_arn=create_configuration_lambda_role_arn, ) jwt_secret = config_yaml["jwt_secret"] config_secret_dict = { "oidc_secrets": { "client_id": cognito_user_pool_client.user_pool_client_id, "secret": cognito_user_pool_client_secret, "client_scope": ["email", "openid"], }, "jwt_secret": jwt_secret, } config_secret_yaml = yaml.dump( config_secret_dict, explicit_start=True, default_flow_style=False, ) config_secret = cr.AwsCustomResource( self, "ConfigSecretResource", policy=cr.AwsCustomResourcePolicy.from_sdk_calls( resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE), on_update=cr.AwsSdkCall( service="SecretsManager", action="updateSecret", parameters={ "SecretId": CONFIG_SECRET_NAME, "SecretString": config_secret_yaml, }, physical_resource_id=cr.PhysicalResourceId.from_response( "Name"), ), on_create=cr.AwsSdkCall( service="SecretsManager", action="createSecret", parameters={ "Name": CONFIG_SECRET_NAME, "Description": "Sensitive configuration parameters for ConsoleMe", "SecretString": config_secret_yaml, }, physical_resource_id=cr.PhysicalResourceId.from_response( "Name"), ), on_delete=cr.AwsSdkCall( service="SecretsManager", action="deleteSecret", parameters={ "SecretId": CONFIG_SECRET_NAME, "ForceDeleteWithoutRecovery": True, }, ), install_latest_aws_sdk=True, log_retention=logs.RetentionDays.ONE_WEEK, ) create_configuration_lambda = lambda_.Function( self, "CreateConfigurationFileLambda", code=lambda_.Code.from_asset("resources/create_config_lambda"), handler="index.handler", timeout=cdk.Duration.seconds(30), layers=[create_dependencies_layer(self, "create_config_lambda")], runtime=lambda_.Runtime.PYTHON_3_8, role=imported_create_configuration_lambda_role, environment={ "DEPLOYMENT_BUCKET": s3_bucket_name, "OIDC_METADATA_URL": "https://cognito-idp." + self.region + ".amazonaws.com/" + cognito_user_pool.user_pool_id + "/.well-known/openid-configuration", "REDIS_HOST": redis.attr_redis_endpoint_address, "SES_IDENTITY_ARN": "arn:aws:ses:" + self.region + ":" + self.account + ":identity/" + domain_name, "SUPPORT_CHAT_URL": "https://discord.gg/nQVpNGGkYu", "APPLICATION_ADMIN": "consoleme_admin", "ACCOUNT_NUMBER": self.account, "ISSUER": domain_name, "SPOKE_ACCOUNTS": ",".join(spoke_accounts), "CONFIG_SECRET_NAME": CONFIG_SECRET_NAME, }, ) create_configuration_resource_provider = cr.Provider( self, "CreateConfigurationFileProvider", on_event_handler=create_configuration_lambda, log_retention=logs.RetentionDays.ONE_WEEK, ) create_configuration_lambda_resource = cdk.CustomResource( self, "CreateConfigurationFile", service_token=create_configuration_resource_provider.service_token, removal_policy=cdk.RemovalPolicy.DESTROY, properties={"UUID": str(uuid4())}, ) create_configuration_lambda_resource.node.add_dependency(config_secret)
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) role = iam.Role( scope=self, id='AwsCustomResourceRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com')) role.add_to_policy( iam.PolicyStatement(actions=['iam:PassRole'], resources=['*'])) my_custom_resource = cr.AwsCustomResource( scope=self, id='MyAwsCustomResource', role=role, policy=cr.AwsCustomResourcePolicy.from_sdk_calls(resources=['*']), on_create=cr.AwsSdkCall( action='listBuckets', service='s3', physical_resource_id=cr.PhysicalResourceId.of('BucketsList'), )) vpc = VPCConstruct(self, id_='test-vpc', num_of_azs=2) security_group = SecurityGroup( self, id='test-security-group', vpc=vpc, security_group_name='test-security-group') security_group.add_ingress_rule(connection=Port.tcp(443), peer=vpc.lambdas_sg) domain = es.Domain( scope=self, id='Domain', version=es.ElasticsearchVersion.V7_9, domain_name="es-domain-name", enable_version_upgrade=False, enforce_https=True, fine_grained_access_control=None, node_to_node_encryption=True, tls_security_policy=es.TLSSecurityPolicy.TLS_1_0, logging=es.LoggingOptions( app_log_enabled=True, slow_index_log_enabled=True, slow_search_log_enabled=True, app_log_group=LogGroup( scope=self, id="app-log-group", log_group_name=f'/aws/aes/domains/esdomain/app-log-group', removal_policy=core.RemovalPolicy.DESTROY), slow_index_log_group=LogGroup( scope=self, id="slow-index-log-group", log_group_name= f'/aws/aes/domains/esdomain/slow-index-log-group', removal_policy=core.RemovalPolicy.DESTROY), slow_search_log_group=LogGroup( scope=self, id="slow-search-log-group", log_group_name= f'/aws/aes/domains/esdomain/slow-search-log-group', removal_policy=core.RemovalPolicy.DESTROY)), removal_policy=core.RemovalPolicy.DESTROY, zone_awareness=es.ZoneAwarenessConfig(availability_zone_count=2, enabled=True), vpc_options=es.VpcOptions( security_groups=[security_group], subnets=vpc.audit_vpc.select_subnets( subnet_group_name=PRIVATE_SUBNET_GROUP).subnets))
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # create s3 bucket s3_Bucket = s3.Bucket(self, "AWS-Cookbook-Recipe-404", removal_policy=RemovalPolicy.DESTROY, auto_delete_objects=True) aws_s3_deployment.BucketDeployment( self, 'S3Deployment', destination_bucket=s3_Bucket, sources=[aws_s3_deployment.Source.asset("./s3_content")], retain_on_delete=False) isolated_subnets = ec2.SubnetConfiguration( name="ISOLATED", subnet_type=ec2.SubnetType.PRIVATE_ISOLATED, cidr_mask=24) # create VPC vpc = ec2.Vpc(self, 'AWS-Cookbook-VPC', cidr='10.10.0.0/23', subnet_configuration=[isolated_subnets]) vpc.add_interface_endpoint( 'VPCSecretsManagerInterfaceEndpoint', service=ec2.InterfaceVpcEndpointAwsService( 'secretsmanager' ), # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames' private_dns_enabled=True, subnets=ec2.SubnetSelection( one_per_az=False, subnet_type=ec2.SubnetType.PRIVATE_ISOLATED), ) vpc.add_gateway_endpoint( 's3GateWayEndPoint', service=ec2.GatewayVpcEndpointAwsService('s3'), subnets=[ ec2.SubnetSelection( subnet_type=ec2.SubnetType.PRIVATE_ISOLATED) ], ) subnet_group = rds.SubnetGroup( self, 'rds_subnet_group', description='VPC Subnet Group for RDS', vpc=vpc, vpc_subnets=ec2.SubnetSelection( one_per_az=False, subnet_type=ec2.SubnetType.PRIVATE_ISOLATED)) rds_instance = rds.DatabaseInstance( self, 'DBInstance', engine=rds.DatabaseInstanceEngine.mysql( version=rds.MysqlEngineVersion.VER_8_0_23), instance_type=ec2.InstanceType("m5.large"), vpc=vpc, multi_az=False, database_name='AWSCookbookRecipe404', instance_identifier='awscookbook404db-orig', delete_automated_backups=True, deletion_protection=False, # iam_authentication= removal_policy=RemovalPolicy.DESTROY, allocated_storage=8, subnet_group=subnet_group) # mkdir -p lambda-layers/sqlparse/python # cd layers/sqlparse/python # pip install sqlparse --target="." # cd ../../../ # create Lambda Layer sqlparse = aws_lambda.LayerVersion( self, "sqlparse", code=aws_lambda.AssetCode('lambda-layers/sqlparse'), compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_8], description="sqlparse", license= "https://github.com/andialbrecht/sqlparse/blob/master/LICENSE") pymysql = aws_lambda.LayerVersion( self, "pymysql", code=aws_lambda.AssetCode('lambda-layers/pymysql'), compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_8], description="pymysql", license="MIT") smartopen = aws_lambda.LayerVersion( self, "smartopen", code=aws_lambda.AssetCode('lambda-layers/smart_open'), compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_8], description="smartopen", license="MIT") lambda_function = aws_lambda.Function( self, 'LambdaRDS', code=aws_lambda.AssetCode("./mysql-lambda/"), handler="lambda_function.lambda_handler", environment={ "DB_SECRET_ARN": rds_instance.secret.secret_arn, "S3_BUCKET": s3_Bucket.bucket_name }, layers=[sqlparse, pymysql, smartopen], memory_size=1024, runtime=aws_lambda.Runtime.PYTHON_3_8, timeout=Duration.seconds(600), vpc=vpc, vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.PRIVATE_ISOLATED)) rds_instance.secret.grant_read(lambda_function) rds_instance.connections.allow_from(lambda_function.connections, ec2.Port.tcp(3306), "Ingress") s3_Bucket.grant_read(lambda_function) create_params = { "FunctionName": lambda_function.function_arn, } on_create = custom_resources.AwsSdkCall( action='invoke', service='Lambda', parameters=create_params, physical_resource_id=custom_resources.PhysicalResourceId.of( 'LambdaRDS')) policy_statement = iam.PolicyStatement( actions=["lambda:InvokeFunction"], effect=iam.Effect.ALLOW, resources=[lambda_function.function_arn], ) policy = custom_resources.AwsCustomResourcePolicy.from_statements( statements=[policy_statement]) custom_resources.AwsCustomResource( self, 'CustomResource', policy=policy, on_create=on_create, log_retention=logs.RetentionDays.TWO_WEEKS) # outputs CfnOutput(self, 'RdsSubnetGroup', value=subnet_group.subnet_group_name) CfnOutput(self, 'RdsDatabaseId', value=rds_instance.instance_identifier)
def __init__(self, scope: cdk.Construct, id: str, domain_name: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # User pool and user pool OAuth client cognito_user_pool = cognito.UserPool( self, "UserPool", removal_policy=cdk.RemovalPolicy.DESTROY) cognito.UserPoolDomain( self, "UserPoolDomain", cognito_domain=cognito.CognitoDomainOptions( domain_prefix=APPLICATION_PREFIX + "-" + APPLICATION_SUFFIX), user_pool=cognito_user_pool, ) cognito_admin_user = cr.AwsCustomResource( self, "UserPoolAdminUserResource", policy=cr.AwsCustomResourcePolicy.from_sdk_calls( resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE), on_create=cr.AwsSdkCall( service="CognitoIdentityServiceProvider", action="adminCreateUser", parameters={ "UserPoolId": cognito_user_pool.user_pool_id, "Username": "******", "UserAttributes": [{ "Name": "email", "Value": "consoleme_admin@" + domain_name }], "TemporaryPassword": ADMIN_TEMP_PASSWORD, }, physical_resource_id=cr.PhysicalResourceId.of( cognito_user_pool.user_pool_id), ), ) cognito_admin_group = cr.AwsCustomResource( self, "UserPoolAdminGroupResource", policy=cr.AwsCustomResourcePolicy.from_sdk_calls( resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE), on_create=cr.AwsSdkCall( service="CognitoIdentityServiceProvider", action="createGroup", parameters={ "UserPoolId": cognito_user_pool.user_pool_id, "GroupName": "consoleme_admins", }, physical_resource_id=cr.PhysicalResourceId.of( id="UserPoolAdminGroupResource"), ), ) cr.AwsCustomResource( self, "UserPoolUserGroupResource", policy=cr.AwsCustomResourcePolicy.from_sdk_calls( resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE), on_create=cr.AwsSdkCall( service="CognitoIdentityServiceProvider", action="createGroup", parameters={ "UserPoolId": cognito_user_pool.user_pool_id, "GroupName": "consoleme_users", }, physical_resource_id=cr.PhysicalResourceId.of( id="UserPoolUserGroupResource"), ), ) cognito_assign_admin_group = cr.AwsCustomResource( self, "UserPoolAssignAdminGroupResource", policy=cr.AwsCustomResourcePolicy.from_sdk_calls( resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE), on_create=cr.AwsSdkCall( service="CognitoIdentityServiceProvider", action="adminAddUserToGroup", parameters={ "UserPoolId": cognito_user_pool.user_pool_id, "GroupName": "consoleme_admins", "Username": "******", }, physical_resource_id=cr.PhysicalResourceId.of( id="UserPoolAssignAdminGroupResource"), ), ) cognito_assign_admin_group.node.add_dependency(cognito_admin_user) cognito_assign_admin_group.node.add_dependency(cognito_admin_group) self.cognito_user_pool = cognito_user_pool
def __init__(self, scope: core.Construct, id: str, cluster: eks.Cluster, kafka: msk.CfnCluster, vpc: ec2.Vpc, **kwargs) -> None: super().__init__(scope, id, **kwargs) pip.main([ "install", "--system", "--target", "custom_resources/kafka/lib", "kafka-python" ]) arn = cr.AwsCustomResource( self, 'clusterArn', policy=cr.AwsCustomResourcePolicy.from_sdk_calls(resources=['*']), on_create=cr.AwsSdkCall( action='listClusters', service='Kafka', physical_resource_id=cr.PhysicalResourceId.of( "ClusterNameFilter"), parameters={ "ClusterNameFilter": kafka.cluster_name, "MaxResults": 1 }, ), ) bootstraps = cr.AwsCustomResource( self, 'clusterBootstraps', policy=cr.AwsCustomResourcePolicy.from_sdk_calls(resources=["*"]), on_create=cr.AwsSdkCall( action='getBootstrapBrokers', service='Kafka', physical_resource_id=cr.PhysicalResourceId.of("ClusterArn"), parameters={ "ClusterArn": arn.get_response_field("ClusterInfoList.0.ClusterArn") }, ), ) manifests = [] for namespace in self.node.try_get_context("kubernetes")['namespaces']: manifests.append({ "apiVersion": "v1", "kind": "ConfigMap", "metadata": { "name": "kafka", "namespace": namespace }, "data": { "bootstrap": bootstraps.get_response_field('BootstrapBrokerStringTls'), } }) eks.KubernetesManifest(self, "kafka-config", cluster=cluster, manifest=manifests) function = lbd.SingletonFunction( self, "KafkaConfigFunction", uuid="b09329a3-5206-46f7-822f-337da714aeac", code=lbd.Code.from_asset("custom_resources/kafka/"), handler="config.handler", runtime=lbd.Runtime.PYTHON_3_7, function_name="kafkaConfig", log_retention=logs.RetentionDays.ONE_DAY, security_group=ec2.SecurityGroup.from_security_group_id( self, "lambdaKafkaVPC", vpc.vpc_default_security_group), timeout=core.Duration.seconds(30), vpc=vpc, vpc_subnets=ec2.SubnetSelection(one_per_az=True)) provider = cr.Provider(self, "KafkaConfigProvider", on_event_handler=function, log_retention=logs.RetentionDays.ONE_DAY) core.CustomResource( self, "KafkaLoadTopic", service_token=provider.service_token, properties={ "bootstrap": bootstraps.get_response_field('BootstrapBrokerStringTls'), "topic": "load", "partitions": 150, "replicas": 1 }) core.CustomResource( self, "KafkaGenerateTopic", service_token=provider.service_token, properties={ "bootstrap": bootstraps.get_response_field('BootstrapBrokerStringTls'), "topic": "generate", "partitions": 200, "replicas": 1 })
def __init__(self, scope: core.Construct, id: str, source_bucket_name: str, glue_database_name: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # get the source bucket - this object is an IBucketProxy interface, not a Buckt construct. # Can not be used to add an event directly. Instead, use a custom resource to add an event trigger later source_bucket = s3.Bucket.from_bucket_name( self, "MySourceBucket", bucket_name=source_bucket_name) # create the new destination bucket - this bucket holds the csv file that containers the FITS header information # the name of the bucket will be <stack-id>-fitsstorebucketXXXXXXXX-YYYYYYYYYYYYY # e.g. my-fits-datalake-fitsstorebucket1234567f-098765432d target_bucket = s3.Bucket(self, "FITSSTORE_BUCKET") # Add the astropy and numpy layers for the lambda function that is used as the event trigger on the source_bucket layer_astropy = lambda_.LayerVersion( self, 'AstroFitsioLayer', code=lambda_.Code.from_asset("resources_layer/astropy.zip"), compatible_runtimes=[lambda_.Runtime.PYTHON_3_7]) # use an AWS provided layer for numpy layer_numpy = lambda_.LayerVersion.from_layer_version_arn( self, "NumpyLayer", "arn:aws:lambda:us-east-1:668099181075:layer:AWSLambda-Python37-SciPy1x:22" ) # create the FITS header extractor lambda function # pass the FITSSTORE_BUCKET to the lambda function as an environment variable handler = lambda_.Function( self, "FITSHeaderExtractorHandler", runtime=lambda_.Runtime.PYTHON_3_7, code=lambda_.Code.asset("resources"), handler="fits_header_extractor.fits_header_extractor_handler", environment=dict(FITSSTORE_BUCKET=target_bucket.bucket_name), layers=[layer_astropy, layer_numpy]) # grant read access to handler on source bucket source_bucket.grant_read(handler) # Give the lambda resource based policy # both source_arn and source_account is needed for security reason handler.add_permission( 's3-trigger-lambda-s3-invoke-function', principal=iam_.ServicePrincipal('s3.amazonaws.com'), action='lambda:InvokeFunction', source_arn=source_bucket.bucket_arn, source_account=self.account) # grant access to the handler # - this is a lot easier than adding policies, but not all constructs support this target_bucket.grant_read_write(handler) # map the put event to hanlder - this doesn't work as source_bucket is not really a Bucket object (IBucketProxy) # You can use this approach if the bucket is created as a new Bucket object #notification = s3_notifications.LambdaDestination(handler) #source_bucket.add_object_created_notification(self, notification ) # use custom resource to add an event trigger on the destnation bucket - # the custom resource creation makes an SDK call to create the event notification on the # Action reference https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html # Events reference https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html custom_s3_resource = custom_resources_.AwsCustomResource( self, 's3-putobject-custom-notification-resource', policy=custom_resources_.AwsCustomResourcePolicy.from_statements([ iam_.PolicyStatement(effect=iam_.Effect.ALLOW, resources=['*'], actions=['s3:PutBucketNotification']) ]), on_create=custom_resources_.AwsSdkCall( service="S3", action="putBucketNotificationConfiguration", parameters={ "Bucket": source_bucket.bucket_name, "NotificationConfiguration": { "LambdaFunctionConfigurations": [{ "Events": ['s3:ObjectCreated:*', 's3:ObjectRemoved:*'], "LambdaFunctionArn": handler.function_arn, "Filter": { "Key": { "FilterRules": [{ 'Name': 'suffix', 'Value': 'fits' }] } } }] } }, physical_resource_id=custom_resources_.PhysicalResourceId.of( f's3-notification-resource-{str(uuid.uuid1())}'), region=self.region)) # Make sure the lambda function is created first custom_s3_resource.node.add_dependency( handler.permissions_node.find_child( 's3-trigger-lambda-s3-invoke-function')) # create a glue crawler to build the data catalog # Step 1 . create a role for AWS Glue glue_role = iam_.Role( self, "glue_role", assumed_by=iam_.ServicePrincipal('glue.amazonaws.com'), managed_policies=[ iam_.ManagedPolicy.from_managed_policy_arn( self, 'MyFitsCrawlerGlueRole', managed_policy_arn= 'arn:aws:iam::aws:policy/service-role/AWSGlueServiceRole') ]) # glue role needs "*" read/write - otherwise crawler will not be able to create tables (and no error messages in crawler logs) glue_role.add_to_policy( iam_.PolicyStatement(actions=[ 's3:GetObject', 's3:PutObject', 'lakeformation:GetDataAccess' ], effect=iam_.Effect.ALLOW, resources=['*'])) # Step 2. create a database in data catalog db = glue_.Database(self, "MyFitsDatabase", database_name=glue_database_name) # Step 3. create a crawler named "fitsdatalakecrawler-<hex>", and schedule to run every 15 mins # You can change the frequency based on your needs # cron schedule format cron(Minutes Hours Day-of-month Month Day-of-week Year) glue_.CfnCrawler( self, "fits-datalake-crawler", database_name=glue_database_name, role=glue_role.role_arn, schedule={"scheduleExpression": "cron(0/15 * * * ? *)"}, targets={"s3Targets": [{ "path": target_bucket.bucket_name }]}, ) # When your AWS Lake Formation Data catalog settings is not set to # "Use only IAM access control for new databases" or # "Use only IAM access control for new tables in new databse" # you need to grant additional permission to the data catalog database. # in order for the crawler to run, we need to add some permissions to lakeformation location_resource = lakeformation_.CfnResource( self, "MyFitsDatalakeLocationResource", resource_arn=target_bucket.bucket_arn, use_service_linked_role=True) lakeformation_.CfnPermissions( self, "MyFitsDatalakeDatabasePermission", data_lake_principal=lakeformation_.CfnPermissions. DataLakePrincipalProperty( data_lake_principal_identifier=glue_role.role_arn), resource=lakeformation_.CfnPermissions.ResourceProperty( database_resource=lakeformation_.CfnPermissions. DatabaseResourceProperty(name=db.database_name)), permissions=["ALTER", "DROP", "CREATE_TABLE"], ) location_permission = lakeformation_.CfnPermissions( self, "MyFitsDatalakeLocationPermission", data_lake_principal=lakeformation_.CfnPermissions. DataLakePrincipalProperty( data_lake_principal_identifier=glue_role.role_arn), resource=lakeformation_.CfnPermissions.ResourceProperty( data_location_resource=lakeformation_.CfnPermissions. DataLocationResourceProperty( s3_resource=target_bucket.bucket_arn)), permissions=["DATA_LOCATION_ACCESS"], ) #make sure the location resource is created first location_permission.node.add_dependency(location_resource)
def __init__( self, scope: core.Construct, id: str, service_control_policy_string: str, description: str, name: str, ) -> None: super().__init__(scope, id) POLICY_ID_LOOKUP = "Policy.PolicySummary.Id" # https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/Organizations.html # https://docs.aws.amazon.com/cdk/api/latest/docs/custom-resources-readme.html#physical-resource-id-parameter on_create_policy = cr.AwsSdkCall( action="createPolicy", service="Organizations", physical_resource_id=cr.PhysicalResourceId.from_response(POLICY_ID_LOOKUP), parameters={ "Content": service_control_policy_string, "Description": description, "Name": name, "Type": "SERVICE_CONTROL_POLICY", }, output_path=POLICY_ID_LOOKUP, ) on_update_policy = cr.AwsSdkCall( action="updatePolicy", service="Organizations", physical_resource_id=cr.PhysicalResourceId.from_response(POLICY_ID_LOOKUP), parameters={ "Content": service_control_policy_string, "Description": description, "Name": name, "PolicyId": cr.PhysicalResourceIdReference(), }, output_path=POLICY_ID_LOOKUP, ) on_delete_policy = cr.AwsSdkCall( action="deletePolicy", service="Organizations", parameters={ "PolicyId": cr.PhysicalResourceIdReference(), }, ) policy = cr.AwsCustomResourcePolicy.from_sdk_calls( resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE ) scp_create = cr.AwsCustomResource( self, "ServiceControlPolicyCreate", install_latest_aws_sdk=True, policy=policy, on_create=on_create_policy, on_update=on_update_policy, on_delete=on_delete_policy, resource_type="Custom::ServiceControlPolicy", ) self.policy_id = scp_create.get_response_field(POLICY_ID_LOOKUP)