Esempio n. 1
0
 def __init__(self, scope: core.Construct, id: str, domain: es.CfnDomain,
              **kwargs) -> None:
     super().__init__(scope, id, **kwargs)
     arn = sdk.AwsCustomResource(
         self,
         'esConfig',
         policy=sdk.AwsCustomResourcePolicy.from_sdk_calls(
             resources=[domain.domain_arn]),
         on_create=sdk.AwsSdkCall(
             action='updateElasticsearchDomainConfig',
             service='ES',
             physical_resource_id=sdk.PhysicalResourceId.of(
                 "updateElasticsearchDomainConfig"),
             output_path="DomainConfig.ElasticsearchClusterConfig",
             parameters={
                 "DomainName": domain.domain_name,
                 "ElasticsearchClusterConfig": {
                     "WarmCount":
                     self.node.try_get_context("elastic")["warm"]["count"],
                     "WarmEnabled":
                     True,
                     "WarmType":
                     self.node.try_get_context("elastic")["warm"]["type"]
                 }
             },
         ),
     )
     arn.node.add_dependency(domain)
Esempio n. 2
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 eks_name: str,
                 eks_arn: str,
                 log_retention=None) -> None:
        super().__init__(scope, id)

        on_create = self.get_on_create_update(eks_name=eks_name)

        lambda_role = iam.Role(
            self,
            "LambdaRole",
            assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaBasicExecutionRole")
            ],
        )

        lambda_policy = custom_resources.AwsCustomResourcePolicy.from_statements(
            [
                iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                    actions=["eks:UpdateClusterConfig"],
                                    resources=["*"])
            ])

        custom_resources.AwsCustomResource(
            scope=scope,
            id=f'{id}-AWSCustomResource',
            log_retention=log_retention,
            on_create=on_create,
            resource_type='Custom::AWS-EKS-Logs-Object',
            role=lambda_role,
            policy=lambda_policy)
Esempio n. 3
0
 def create_userpool_client_secret(
     self,
     user_pool: aws_cognito.CfnUserPool,
     user_pool_client: aws_cognito.CfnUserPoolClient,
     tag: str,
 ) -> custom_resources.AwsCustomResource:
     """
     :return: an AwsCustomResource that provides access to the user pool client secret in the
         response field `user_pool_client_secret`
     """
     resource = custom_resources.AwsCustomResource(
         self,
         f"userpool_client_secret_{tag}",
         resource_type="Custom::UserPoolClientSecret",
         policy=custom_resources.AwsCustomResourcePolicy.from_statements([
             aws_iam.PolicyStatement(
                 effect=aws_iam.Effect.ALLOW,
                 actions=["cognito-idp:DescribeUserPoolClient"],
                 resources=[
                     f"arn:aws:cognito-idp:{self.region}:{self.account}:userpool/{user_pool.ref}"  # noqa: E501
                 ],
             )
         ]),
         on_create=custom_resources.AwsSdkCall(
             physical_resource_id=custom_resources.PhysicalResourceId.of(
                 user_pool_client.ref),
             service="CognitoIdentityServiceProvider",
             action="describeUserPoolClient",
             output_path="UserPoolClient.ClientSecret",
             parameters={
                 "ClientId": user_pool_client.ref,
                 "UserPoolId": user_pool.ref
             },
         ),
         on_update=custom_resources.AwsSdkCall(
             physical_resource_id=custom_resources.PhysicalResourceId.of(
                 user_pool_client.ref),
             service="CognitoIdentityServiceProvider",
             action="describeUserPoolClient",
             output_path="UserPoolClient.ClientSecret",
             parameters={
                 "ClientId": user_pool_client.ref,
                 "UserPoolId": user_pool.ref
             },
         ),
     )
     return resource
Esempio n. 4
0
    def __init__(self, scope: core.Construct, id: str,
                 props: ParameterReaderProps, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.resource = cr.AwsCustomResource(
            self,
            'get_parameters',
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(
                resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE),
            on_create=cr.AwsSdkCall(
                service='SSM',
                action='getParameter',
                parameters={
                    'Name': props.parameterName,
                    'WithDecryption': props.with_decryption,
                },
                region=props.region,
                physical_resource_id=cr.PhysicalResourceId.of(
                    id=str(datetime.now()))))
    def __init__(
            self,
            scope: core.Construct,
            id: str,
            iam_policy: cr.AwsCustomResourcePolicy,
            quicksight_group_arn: str,
            redshift_datasource_arn: str,
            redshift_dataset_name: str,
            dataset_actions: list,
            redshift_custom_sql: str,
            redshift_columns: list,
            redshift_data_transformations: list,
            **kwargs):

        super().__init__(scope, id, **kwargs)

        aws_account_id = core.Aws.ACCOUNT_ID
        uniquestring = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
        dataset_id = redshift_dataset_name + uniquestring
        dataset_physical_id = redshift_dataset_name + uniquestring

        quicksight_data_set = cr.AwsCustomResource(self, 'RedshiftDataSet',
                                                   on_create={
                                                       "service": "QuickSight",
                                                       "action": "createDataSet",
                                                       "parameters": {
                                                           "AwsAccountId": aws_account_id,
                                                           "DataSetId": dataset_id,
                                                           "Name": redshift_dataset_name,
                                                           "ImportMode": "DIRECT_QUERY",
                                                           "PhysicalTableMap": {
                                                               "RedshiftPhysicalTable": {
                                                                   "CustomSql": {
                                                                       "DataSourceArn": redshift_datasource_arn,
                                                                       "Name": redshift_dataset_name,
                                                                       "SqlQuery": redshift_custom_sql,
                                                                       "Columns": redshift_columns
                                                                   }
                                                               }
                                                           },
                                                           "LogicalTableMap": {
                                                               "RedshiftLogicalTable": {
                                                                   "Alias": redshift_dataset_name,
                                                                   "DataTransforms": redshift_data_transformations,
                                                                   "Source": {
                                                                       "PhysicalTableId": "RedshiftPhysicalTable"
                                                                   }
                                                               }
                                                           },
                                                           "Permissions": [
                                                               {
                                                                   "Principal": quicksight_group_arn,
                                                                   "Actions": dataset_actions
                                                               }
                                                           ],

                                                       },
                                                       "physical_resource_id": cr.PhysicalResourceId.of(
                                                           dataset_physical_id)},
                                                   on_delete={
                                                       "service": "QuickSight",
                                                       "action": "deleteDataSet",
                                                       "parameters": {
                                                           "AwsAccountId": aws_account_id,
                                                           "DataSetId": dataset_id
                                                       },
                                                       "physical_resource_id": cr.PhysicalResourceId.of(
                                                           dataset_physical_id)},
                                                   policy=iam_policy
                                                   )

        self.__redshift_dataset_arn = quicksight_data_set.get_response_field("Arn")

        core.CfnOutput(
            self, "RedshiftDataSetArn",
            description="Redshift Data Set Arn",
            value=self.__redshift_dataset_arn
        )
Esempio n. 6
0
    def __init__(self, scope: cdk.Construct, id: str,
                 cognito_user_pool: cognito.UserPool, s3_bucket_name: str,
                 create_configuration_lambda_role_arn: str,
                 redis: ec.CfnCacheCluster, domain_name: str,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        config_yaml = yaml.load(open("config.yaml"), Loader=yaml.FullLoader)
        spoke_accounts = config_yaml.get("spoke_accounts", [])

        cognito_user_pool_client = cognito.UserPoolClient(
            self,
            "UserPoolClient",
            user_pool=cognito_user_pool,
            generate_secret=True,
            supported_identity_providers=[
                cognito.UserPoolClientIdentityProvider.COGNITO
            ],
            prevent_user_existence_errors=True,
            o_auth=cognito.OAuthSettings(
                callback_urls=[
                    "https://" + domain_name + "/auth",
                    "https://" + domain_name + "/oauth2/idpresponse",
                ],
                logout_urls=["https://" + domain_name + "/logout"],
                flows=cognito.OAuthFlows(authorization_code_grant=True,
                                         implicit_code_grant=True),
                scopes=[cognito.OAuthScope.OPENID, cognito.OAuthScope.EMAIL],
            ),
            auth_flows=cognito.AuthFlow(user_password=True, user_srp=True),
        )

        describe_cognito_user_pool_client = cr.AwsCustomResource(
            self,
            "UserPoolClientIDResource",
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(
                resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE),
            on_create=cr.AwsSdkCall(
                service="CognitoIdentityServiceProvider",
                action="describeUserPoolClient",
                parameters={
                    "UserPoolId": cognito_user_pool.user_pool_id,
                    "ClientId": cognito_user_pool_client.user_pool_client_id,
                },
                physical_resource_id=cr.PhysicalResourceId.of(
                    cognito_user_pool_client.user_pool_client_id),
            ),
            install_latest_aws_sdk=True,
            log_retention=logs.RetentionDays.ONE_WEEK,
        )

        cognito_user_pool_client_secret = (
            describe_cognito_user_pool_client.get_response_field(
                "UserPoolClient.ClientSecret"))

        imported_create_configuration_lambda_role = iam.Role.from_role_arn(
            self,
            "ImportedCreateConfigurationFileLambdaRole",
            role_arn=create_configuration_lambda_role_arn,
        )

        jwt_secret = config_yaml["jwt_secret"]

        config_secret_dict = {
            "oidc_secrets": {
                "client_id": cognito_user_pool_client.user_pool_client_id,
                "secret": cognito_user_pool_client_secret,
                "client_scope": ["email", "openid"],
            },
            "jwt_secret": jwt_secret,
        }

        config_secret_yaml = yaml.dump(
            config_secret_dict,
            explicit_start=True,
            default_flow_style=False,
        )

        config_secret = cr.AwsCustomResource(
            self,
            "ConfigSecretResource",
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(
                resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE),
            on_update=cr.AwsSdkCall(
                service="SecretsManager",
                action="updateSecret",
                parameters={
                    "SecretId": CONFIG_SECRET_NAME,
                    "SecretString": config_secret_yaml,
                },
                physical_resource_id=cr.PhysicalResourceId.from_response(
                    "Name"),
            ),
            on_create=cr.AwsSdkCall(
                service="SecretsManager",
                action="createSecret",
                parameters={
                    "Name": CONFIG_SECRET_NAME,
                    "Description":
                    "Sensitive configuration parameters for ConsoleMe",
                    "SecretString": config_secret_yaml,
                },
                physical_resource_id=cr.PhysicalResourceId.from_response(
                    "Name"),
            ),
            on_delete=cr.AwsSdkCall(
                service="SecretsManager",
                action="deleteSecret",
                parameters={
                    "SecretId": CONFIG_SECRET_NAME,
                    "ForceDeleteWithoutRecovery": True,
                },
            ),
            install_latest_aws_sdk=True,
            log_retention=logs.RetentionDays.ONE_WEEK,
        )

        create_configuration_lambda = lambda_.Function(
            self,
            "CreateConfigurationFileLambda",
            code=lambda_.Code.from_asset("resources/create_config_lambda"),
            handler="index.handler",
            timeout=cdk.Duration.seconds(30),
            layers=[create_dependencies_layer(self, "create_config_lambda")],
            runtime=lambda_.Runtime.PYTHON_3_8,
            role=imported_create_configuration_lambda_role,
            environment={
                "DEPLOYMENT_BUCKET":
                s3_bucket_name,
                "OIDC_METADATA_URL":
                "https://cognito-idp." + self.region + ".amazonaws.com/" +
                cognito_user_pool.user_pool_id +
                "/.well-known/openid-configuration",
                "REDIS_HOST":
                redis.attr_redis_endpoint_address,
                "SES_IDENTITY_ARN":
                "arn:aws:ses:" + self.region + ":" + self.account +
                ":identity/" + domain_name,
                "SUPPORT_CHAT_URL":
                "https://discord.gg/nQVpNGGkYu",
                "APPLICATION_ADMIN":
                "consoleme_admin",
                "ACCOUNT_NUMBER":
                self.account,
                "ISSUER":
                domain_name,
                "SPOKE_ACCOUNTS":
                ",".join(spoke_accounts),
                "CONFIG_SECRET_NAME":
                CONFIG_SECRET_NAME,
            },
        )

        create_configuration_resource_provider = cr.Provider(
            self,
            "CreateConfigurationFileProvider",
            on_event_handler=create_configuration_lambda,
            log_retention=logs.RetentionDays.ONE_WEEK,
        )

        create_configuration_lambda_resource = cdk.CustomResource(
            self,
            "CreateConfigurationFile",
            service_token=create_configuration_resource_provider.service_token,
            removal_policy=cdk.RemovalPolicy.DESTROY,
            properties={"UUID": str(uuid4())},
        )

        create_configuration_lambda_resource.node.add_dependency(config_secret)
    def __init__(self, scope: core.Construct, id: str,
                 iam_policy: cr.AwsCustomResourcePolicy,
                 quicksight_group_arn: str, redshift_dataset_arn: str,
                 redshift_analysis_name: str,
                 redshift_analysis_template_alias: str, analysis_actions: list,
                 **kwargs):

        super().__init__(scope, id, **kwargs)

        aws_account_id = core.Aws.ACCOUNT_ID
        uniquestring = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
        redshift_analysis_id = redshift_analysis_name + uniquestring
        redshift_analysis_physical_id = redshift_analysis_name + uniquestring

        redshift_analysis = cr.AwsCustomResource(
            self,
            'RedshiftAnalysis',
            on_create={
                "service":
                "QuickSight",
                "action":
                "createAnalysis",
                "parameters": {
                    "AwsAccountId":
                    aws_account_id,
                    "Name":
                    redshift_analysis_name,
                    "AnalysisId":
                    redshift_analysis_id,
                    "Permissions": [
                        {
                            'Principal': quicksight_group_arn,
                            'Actions': analysis_actions
                        },
                    ],
                    "SourceEntity": {
                        "SourceTemplate": {
                            "Arn":
                            redshift_analysis_template_alias,
                            "DataSetReferences": [{
                                "DataSetArn":
                                redshift_dataset_arn,
                                "DataSetPlaceholder":
                                "MainDataset"
                            }]
                        }
                    }
                },
                "physical_resource_id":
                cr.PhysicalResourceId.of(redshift_analysis_physical_id)
            },
            on_delete={
                "service":
                "QuickSight",
                "action":
                "deleteAnalysis",
                "parameters": {
                    "AwsAccountId": aws_account_id,
                    "AnalysisId": redshift_analysis_id,
                    "ForceDeleteWithoutRecovery": True
                },
                "physical_resource_id":
                cr.PhysicalResourceId.of(redshift_analysis_physical_id)
            },
            policy=iam_policy)
Esempio n. 8
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        role = iam.Role(
            scope=self,
            id='AwsCustomResourceRole',
            assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'))
        role.add_to_policy(
            iam.PolicyStatement(actions=['iam:PassRole'], resources=['*']))

        my_custom_resource = cr.AwsCustomResource(
            scope=self,
            id='MyAwsCustomResource',
            role=role,
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(resources=['*']),
            on_create=cr.AwsSdkCall(
                action='listBuckets',
                service='s3',
                physical_resource_id=cr.PhysicalResourceId.of('BucketsList'),
            ))

        vpc = VPCConstruct(self, id_='test-vpc', num_of_azs=2)
        security_group = SecurityGroup(
            self,
            id='test-security-group',
            vpc=vpc,
            security_group_name='test-security-group')
        security_group.add_ingress_rule(connection=Port.tcp(443),
                                        peer=vpc.lambdas_sg)

        domain = es.Domain(
            scope=self,
            id='Domain',
            version=es.ElasticsearchVersion.V7_9,
            domain_name="es-domain-name",
            enable_version_upgrade=False,
            enforce_https=True,
            fine_grained_access_control=None,
            node_to_node_encryption=True,
            tls_security_policy=es.TLSSecurityPolicy.TLS_1_0,
            logging=es.LoggingOptions(
                app_log_enabled=True,
                slow_index_log_enabled=True,
                slow_search_log_enabled=True,
                app_log_group=LogGroup(
                    scope=self,
                    id="app-log-group",
                    log_group_name=f'/aws/aes/domains/esdomain/app-log-group',
                    removal_policy=core.RemovalPolicy.DESTROY),
                slow_index_log_group=LogGroup(
                    scope=self,
                    id="slow-index-log-group",
                    log_group_name=
                    f'/aws/aes/domains/esdomain/slow-index-log-group',
                    removal_policy=core.RemovalPolicy.DESTROY),
                slow_search_log_group=LogGroup(
                    scope=self,
                    id="slow-search-log-group",
                    log_group_name=
                    f'/aws/aes/domains/esdomain/slow-search-log-group',
                    removal_policy=core.RemovalPolicy.DESTROY)),
            removal_policy=core.RemovalPolicy.DESTROY,
            zone_awareness=es.ZoneAwarenessConfig(availability_zone_count=2,
                                                  enabled=True),
            vpc_options=es.VpcOptions(
                security_groups=[security_group],
                subnets=vpc.audit_vpc.select_subnets(
                    subnet_group_name=PRIVATE_SUBNET_GROUP).subnets))
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # create s3 bucket
        s3_Bucket = s3.Bucket(self,
                              "AWS-Cookbook-Recipe-404",
                              removal_policy=RemovalPolicy.DESTROY,
                              auto_delete_objects=True)

        aws_s3_deployment.BucketDeployment(
            self,
            'S3Deployment',
            destination_bucket=s3_Bucket,
            sources=[aws_s3_deployment.Source.asset("./s3_content")],
            retain_on_delete=False)

        isolated_subnets = ec2.SubnetConfiguration(
            name="ISOLATED",
            subnet_type=ec2.SubnetType.PRIVATE_ISOLATED,
            cidr_mask=24)

        # create VPC
        vpc = ec2.Vpc(self,
                      'AWS-Cookbook-VPC',
                      cidr='10.10.0.0/23',
                      subnet_configuration=[isolated_subnets])

        vpc.add_interface_endpoint(
            'VPCSecretsManagerInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService(
                'secretsmanager'
            ),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False, subnet_type=ec2.SubnetType.PRIVATE_ISOLATED),
        )

        vpc.add_gateway_endpoint(
            's3GateWayEndPoint',
            service=ec2.GatewayVpcEndpointAwsService('s3'),
            subnets=[
                ec2.SubnetSelection(
                    subnet_type=ec2.SubnetType.PRIVATE_ISOLATED)
            ],
        )

        subnet_group = rds.SubnetGroup(
            self,
            'rds_subnet_group',
            description='VPC Subnet Group for RDS',
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                one_per_az=False, subnet_type=ec2.SubnetType.PRIVATE_ISOLATED))

        rds_instance = rds.DatabaseInstance(
            self,
            'DBInstance',
            engine=rds.DatabaseInstanceEngine.mysql(
                version=rds.MysqlEngineVersion.VER_8_0_23),
            instance_type=ec2.InstanceType("m5.large"),
            vpc=vpc,
            multi_az=False,
            database_name='AWSCookbookRecipe404',
            instance_identifier='awscookbook404db-orig',
            delete_automated_backups=True,
            deletion_protection=False,
            # iam_authentication=
            removal_policy=RemovalPolicy.DESTROY,
            allocated_storage=8,
            subnet_group=subnet_group)

        # mkdir -p lambda-layers/sqlparse/python
        # cd layers/sqlparse/python
        # pip install sqlparse --target="."
        # cd ../../../

        # create Lambda Layer
        sqlparse = aws_lambda.LayerVersion(
            self,
            "sqlparse",
            code=aws_lambda.AssetCode('lambda-layers/sqlparse'),
            compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_8],
            description="sqlparse",
            license=
            "https://github.com/andialbrecht/sqlparse/blob/master/LICENSE")

        pymysql = aws_lambda.LayerVersion(
            self,
            "pymysql",
            code=aws_lambda.AssetCode('lambda-layers/pymysql'),
            compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_8],
            description="pymysql",
            license="MIT")

        smartopen = aws_lambda.LayerVersion(
            self,
            "smartopen",
            code=aws_lambda.AssetCode('lambda-layers/smart_open'),
            compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_8],
            description="smartopen",
            license="MIT")

        lambda_function = aws_lambda.Function(
            self,
            'LambdaRDS',
            code=aws_lambda.AssetCode("./mysql-lambda/"),
            handler="lambda_function.lambda_handler",
            environment={
                "DB_SECRET_ARN": rds_instance.secret.secret_arn,
                "S3_BUCKET": s3_Bucket.bucket_name
            },
            layers=[sqlparse, pymysql, smartopen],
            memory_size=1024,
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            timeout=Duration.seconds(600),
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED))

        rds_instance.secret.grant_read(lambda_function)

        rds_instance.connections.allow_from(lambda_function.connections,
                                            ec2.Port.tcp(3306), "Ingress")

        s3_Bucket.grant_read(lambda_function)

        create_params = {
            "FunctionName": lambda_function.function_arn,
        }

        on_create = custom_resources.AwsSdkCall(
            action='invoke',
            service='Lambda',
            parameters=create_params,
            physical_resource_id=custom_resources.PhysicalResourceId.of(
                'LambdaRDS'))

        policy_statement = iam.PolicyStatement(
            actions=["lambda:InvokeFunction"],
            effect=iam.Effect.ALLOW,
            resources=[lambda_function.function_arn],
        )

        policy = custom_resources.AwsCustomResourcePolicy.from_statements(
            statements=[policy_statement])

        custom_resources.AwsCustomResource(
            self,
            'CustomResource',
            policy=policy,
            on_create=on_create,
            log_retention=logs.RetentionDays.TWO_WEEKS)

        # outputs

        CfnOutput(self, 'RdsSubnetGroup', value=subnet_group.subnet_group_name)

        CfnOutput(self,
                  'RdsDatabaseId',
                  value=rds_instance.instance_identifier)
    def __init__(self, scope: core.Construct, construct_id: str,
                 subnets: List[str], vpc: ec2.IVpc, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)
        self.env_name = "MwaaForEmrOnEks"
        self.prefix_list_id = self.node.try_get_context("prefix")

        # Create S3 bucket for MWAA
        bucket = s3.Bucket(self,
                           "MwaaBucket",
                           encryption=s3.BucketEncryption.S3_MANAGED,
                           block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
                           versioned=True)
        core.CfnOutput(self, "BucketName", value=bucket.bucket_name)

        # Create MWAA role
        role = iam.Role(
            self,
            "MwaaRole",
            assumed_by=iam.ServicePrincipal("airflow-env.amazonaws.com"))
        role.add_to_policy(
            iam.PolicyStatement(resources=[
                f"arn:aws:airflow:{self.region}:{self.account}:environment/{self.env_name}"
            ],
                                actions=["airflow:PublishMetrics"],
                                effect=iam.Effect.ALLOW))
        role.add_to_policy(
            iam.PolicyStatement(resources=[
                f"arn:aws:s3:::{bucket.bucket_name}",
                f"arn:aws:s3:::{bucket.bucket_name}/*"
            ],
                                actions=["s3:ListAllMyBuckets"],
                                effect=iam.Effect.DENY))
        role.add_to_policy(
            iam.PolicyStatement(
                resources=[
                    f"arn:aws:s3:::{bucket.bucket_name}",
                    f"arn:aws:s3:::{bucket.bucket_name}/*"
                ],
                actions=["s3:GetObject*", "s3:GetBucket*", "s3:List*"],
                effect=iam.Effect.ALLOW))
        role.add_to_policy(
            iam.PolicyStatement(resources=[
                f"arn:aws:logs:{self.region}:{self.account}:log-group:airflow-{self.env_name}-*"
            ],
                                actions=[
                                    "logs:CreateLogStream",
                                    "logs:CreateLogGroup", "logs:PutLogEvents",
                                    "logs:GetLogEvents", "logs:GetLogRecord",
                                    "logs:GetLogGroupFields",
                                    "logs:GetQueryResults",
                                    "logs:DescribeLogGroups"
                                ],
                                effect=iam.Effect.ALLOW))
        role.add_to_policy(
            iam.PolicyStatement(resources=["*"],
                                actions=["cloudwatch:PutMetricData"],
                                effect=iam.Effect.ALLOW))
        role.add_to_policy(
            iam.PolicyStatement(resources=["*"],
                                actions=[
                                    "emr-containers:StartJobRun",
                                    "emr-containers:ListJobRuns",
                                    "emr-containers:DescribeJobRun",
                                    "emr-containers:CancelJobRun"
                                ],
                                effect=iam.Effect.ALLOW))
        role.add_to_policy(
            iam.PolicyStatement(
                resources=[f"arn:aws:sqs:{self.region}:*:airflow-celery-*"],
                actions=[
                    "sqs:ChangeMessageVisibility", "sqs:DeleteMessage",
                    "sqs:GetQueueAttributes", "sqs:GetQueueUrl",
                    "sqs:ReceiveMessage", "sqs:SendMessage"
                ],
                effect=iam.Effect.ALLOW))
        string_like = core.CfnJson(
            self,
            "ConditionJson",
            value={f"kms:ViaService": f"sqs.{self.region}.amazonaws.com"})
        role.add_to_policy(
            iam.PolicyStatement(
                not_resources=[f"arn:aws:kms:*:{self.account}:key/*"],
                actions=[
                    "kms:Decrypt", "kms:DescribeKey", "kms:GenerateDataKey*",
                    "kms:Encrypt"
                ],
                effect=iam.Effect.ALLOW,
                conditions={"StringLike": string_like}))

        # Upload MWAA pre-reqs
        s3deploy.BucketDeployment(
            self,
            "DeployPlugin",
            sources=[
                s3deploy.Source.asset(
                    "./emr_eks_cdk/mwaa_plugins",
                    exclude=['**', '!emr_containers_airflow_plugin.zip'])
            ],
            destination_bucket=bucket,
            destination_key_prefix="plug-ins")
        s3req = s3deploy.BucketDeployment(
            self,
            "DeployReq",
            sources=[
                s3deploy.Source.asset("./emr_eks_cdk/mwaa_plugins",
                                      exclude=['**', '!requirements.txt'])
            ],
            destination_bucket=bucket,
            destination_key_prefix="Requirements")
        s3deploy.BucketDeployment(self,
                                  "DeployDag",
                                  sources=[
                                      s3deploy.Source.asset(
                                          "./emr_eks_cdk/mwaa_plugins",
                                          exclude=['**', '!emr_eks.py'])
                                  ],
                                  destination_bucket=bucket,
                                  destination_key_prefix="DAG")

        # Get object versions
        req_obj_version = custom.AwsCustomResource(
            self,
            "GetReqV",
            on_update={
                "service":
                "S3",
                "action":
                "headObject",
                "parameters": {
                    "Bucket": bucket.bucket_name,
                    "Key": "Requirements/requirements.txt"
                },
                "physical_resource_id":
                custom.PhysicalResourceId.from_response("VersionId")
            },
            policy=custom.AwsCustomResourcePolicy.from_sdk_calls(
                resources=custom.AwsCustomResourcePolicy.ANY_RESOURCE),
            role=iam.Role(
                scope=self,
                id=f'{construct_id}-LambdaRole',
                assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'),
                managed_policies=[
                    iam.ManagedPolicy.from_aws_managed_policy_name(
                        "service-role/AWSLambdaBasicExecutionRole"),
                    iam.ManagedPolicy.from_aws_managed_policy_name(
                        "AmazonS3FullAccess")
                ]))
        core.CfnOutput(self,
                       "ReqObjVersion",
                       value=req_obj_version.get_response_field("VersionId"))
        plugin_obj_version = custom.AwsCustomResource(
            self,
            "GetPluginV",
            on_update={
                "service":
                "S3",
                "action":
                "headObject",
                "parameters": {
                    "Bucket": bucket.bucket_name,
                    "Key": "plug-ins/emr_containers_airflow_plugin.zip"
                },
                "physical_resource_id":
                custom.PhysicalResourceId.from_response("VersionId")
            },
            policy=custom.AwsCustomResourcePolicy.from_sdk_calls(
                resources=custom.AwsCustomResourcePolicy.ANY_RESOURCE),
            role=iam.Role(
                scope=self,
                id=f'{construct_id}-LambdaRole-2',
                assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'),
                managed_policies=[
                    iam.ManagedPolicy.from_aws_managed_policy_name(
                        "service-role/AWSLambdaBasicExecutionRole"),
                    iam.ManagedPolicy.from_aws_managed_policy_name(
                        "AmazonS3FullAccess")
                ]))
        core.CfnOutput(
            self,
            "PluginObjVersion",
            value=plugin_obj_version.get_response_field("VersionId"))

        # Create security group
        mwaa_sg = ec2.SecurityGroup(self,
                                    "SecurityGroup",
                                    vpc=vpc,
                                    description="Allow inbound access to MWAA",
                                    allow_all_outbound=True)
        mwaa_sg.add_ingress_rule(ec2.Peer.prefix_list(self.prefix_list_id),
                                 ec2.Port.all_tcp(),
                                 "allow inbound access from the prefix list")
        mwaa_sg.add_ingress_rule(mwaa_sg, ec2.Port.all_traffic(),
                                 "allow inbound access from the SG")

        mwaa_env = mwaa.CfnEnvironment(
            self,
            "MWAAEnv",
            name=self.env_name,
            dag_s3_path="DAG",
            environment_class="mw1.small",
            execution_role_arn=role.role_arn,
            logging_configuration=mwaa.CfnEnvironment.
            LoggingConfigurationProperty(
                dag_processing_logs=mwaa.CfnEnvironment.
                ModuleLoggingConfigurationProperty(enabled=True,
                                                   log_level='INFO'),
                scheduler_logs=mwaa.CfnEnvironment.
                ModuleLoggingConfigurationProperty(enabled=True,
                                                   log_level='INFO'),
                task_logs=mwaa.CfnEnvironment.
                ModuleLoggingConfigurationProperty(enabled=True,
                                                   log_level='INFO'),
                webserver_logs=mwaa.CfnEnvironment.
                ModuleLoggingConfigurationProperty(enabled=True,
                                                   log_level='INFO'),
                worker_logs=mwaa.CfnEnvironment.
                ModuleLoggingConfigurationProperty(enabled=True,
                                                   log_level='INFO')),
            network_configuration=mwaa.CfnEnvironment.
            NetworkConfigurationProperty(
                security_group_ids=[mwaa_sg.security_group_id],
                subnet_ids=subnets),
            plugins_s3_path="plug-ins/emr_containers_airflow_plugin.zip",
            plugins_s3_object_version=plugin_obj_version.get_response_field(
                "VersionId"),
            requirements_s3_path="Requirements/requirements.txt",
            requirements_s3_object_version=req_obj_version.get_response_field(
                "VersionId"),
            source_bucket_arn=bucket.bucket_arn,
            webserver_access_mode='PUBLIC_ONLY')
        core.CfnOutput(self, "MWAA_NAME", value=self.env_name)
Esempio n. 11
0
    def __init__(self, scope: core.Construct, id: str, source_bucket_name: str,
                 glue_database_name: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # get the source bucket - this object is an IBucketProxy interface, not a Buckt construct.
        # Can not be used to add an event directly. Instead, use a custom resource to add an event trigger later
        source_bucket = s3.Bucket.from_bucket_name(
            self, "MySourceBucket", bucket_name=source_bucket_name)

        # create the new destination bucket - this bucket holds the csv file that containers the FITS header information
        # the name of the bucket will be <stack-id>-fitsstorebucketXXXXXXXX-YYYYYYYYYYYYY
        # e.g. my-fits-datalake-fitsstorebucket1234567f-098765432d
        target_bucket = s3.Bucket(self, "FITSSTORE_BUCKET")

        # Add the astropy and numpy layers for the lambda function that is used as the event trigger on the source_bucket
        layer_astropy = lambda_.LayerVersion(
            self,
            'AstroFitsioLayer',
            code=lambda_.Code.from_asset("resources_layer/astropy.zip"),
            compatible_runtimes=[lambda_.Runtime.PYTHON_3_7])
        # use an AWS provided layer for numpy
        layer_numpy = lambda_.LayerVersion.from_layer_version_arn(
            self, "NumpyLayer",
            "arn:aws:lambda:us-east-1:668099181075:layer:AWSLambda-Python37-SciPy1x:22"
        )

        # create the FITS header extractor lambda function
        # pass the FITSSTORE_BUCKET to the lambda function as an environment variable
        handler = lambda_.Function(
            self,
            "FITSHeaderExtractorHandler",
            runtime=lambda_.Runtime.PYTHON_3_7,
            code=lambda_.Code.asset("resources"),
            handler="fits_header_extractor.fits_header_extractor_handler",
            environment=dict(FITSSTORE_BUCKET=target_bucket.bucket_name),
            layers=[layer_astropy, layer_numpy])

        # grant read access to handler on source bucket
        source_bucket.grant_read(handler)

        # Give the lambda resource based policy
        # both source_arn and source_account is needed for security reason
        handler.add_permission(
            's3-trigger-lambda-s3-invoke-function',
            principal=iam_.ServicePrincipal('s3.amazonaws.com'),
            action='lambda:InvokeFunction',
            source_arn=source_bucket.bucket_arn,
            source_account=self.account)

        # grant access to the handler
        # - this is a lot easier than adding policies, but not all constructs support this
        target_bucket.grant_read_write(handler)

        # map the put event to hanlder - this doesn't work as source_bucket is not really a Bucket object (IBucketProxy)
        # You can use this approach if the bucket is created as a new Bucket object
        #notification = s3_notifications.LambdaDestination(handler)
        #source_bucket.add_object_created_notification(self, notification )

        # use custom resource to add an event trigger on the destnation bucket -
        # the custom resource creation makes an SDK call to create the event notification on the
        # Action reference https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html
        # Events reference https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
        custom_s3_resource = custom_resources_.AwsCustomResource(
            self,
            's3-putobject-custom-notification-resource',
            policy=custom_resources_.AwsCustomResourcePolicy.from_statements([
                iam_.PolicyStatement(effect=iam_.Effect.ALLOW,
                                     resources=['*'],
                                     actions=['s3:PutBucketNotification'])
            ]),
            on_create=custom_resources_.AwsSdkCall(
                service="S3",
                action="putBucketNotificationConfiguration",
                parameters={
                    "Bucket": source_bucket.bucket_name,
                    "NotificationConfiguration": {
                        "LambdaFunctionConfigurations": [{
                            "Events":
                            ['s3:ObjectCreated:*', 's3:ObjectRemoved:*'],
                            "LambdaFunctionArn":
                            handler.function_arn,
                            "Filter": {
                                "Key": {
                                    "FilterRules": [{
                                        'Name': 'suffix',
                                        'Value': 'fits'
                                    }]
                                }
                            }
                        }]
                    }
                },
                physical_resource_id=custom_resources_.PhysicalResourceId.of(
                    f's3-notification-resource-{str(uuid.uuid1())}'),
                region=self.region))

        # Make sure the lambda function is created first
        custom_s3_resource.node.add_dependency(
            handler.permissions_node.find_child(
                's3-trigger-lambda-s3-invoke-function'))

        # create a glue crawler to build the data catalog
        # Step 1 . create a role for AWS Glue
        glue_role = iam_.Role(
            self,
            "glue_role",
            assumed_by=iam_.ServicePrincipal('glue.amazonaws.com'),
            managed_policies=[
                iam_.ManagedPolicy.from_managed_policy_arn(
                    self,
                    'MyFitsCrawlerGlueRole',
                    managed_policy_arn=
                    'arn:aws:iam::aws:policy/service-role/AWSGlueServiceRole')
            ])
        # glue role needs "*" read/write - otherwise crawler will not be able to create tables (and no error messages in crawler logs)
        glue_role.add_to_policy(
            iam_.PolicyStatement(actions=[
                's3:GetObject', 's3:PutObject', 'lakeformation:GetDataAccess'
            ],
                                 effect=iam_.Effect.ALLOW,
                                 resources=['*']))

        # Step 2. create a database in data catalog
        db = glue_.Database(self,
                            "MyFitsDatabase",
                            database_name=glue_database_name)

        # Step 3. create a crawler named "fitsdatalakecrawler-<hex>", and schedule to run every 15 mins
        # You can change the frequency based on your needs
        # cron schedule format cron(Minutes Hours Day-of-month Month Day-of-week Year)
        glue_.CfnCrawler(
            self,
            "fits-datalake-crawler",
            database_name=glue_database_name,
            role=glue_role.role_arn,
            schedule={"scheduleExpression": "cron(0/15 * * * ? *)"},
            targets={"s3Targets": [{
                "path": target_bucket.bucket_name
            }]},
        )

        # When your AWS Lake Formation Data catalog settings is not set to
        # "Use only IAM access control for new databases" or
        # "Use only IAM access control for new tables in new databse"
        # you need to grant additional permission to the data catalog database.
        # in order for the crawler to run, we need to add some permissions to lakeformation

        location_resource = lakeformation_.CfnResource(
            self,
            "MyFitsDatalakeLocationResource",
            resource_arn=target_bucket.bucket_arn,
            use_service_linked_role=True)
        lakeformation_.CfnPermissions(
            self,
            "MyFitsDatalakeDatabasePermission",
            data_lake_principal=lakeformation_.CfnPermissions.
            DataLakePrincipalProperty(
                data_lake_principal_identifier=glue_role.role_arn),
            resource=lakeformation_.CfnPermissions.ResourceProperty(
                database_resource=lakeformation_.CfnPermissions.
                DatabaseResourceProperty(name=db.database_name)),
            permissions=["ALTER", "DROP", "CREATE_TABLE"],
        )
        location_permission = lakeformation_.CfnPermissions(
            self,
            "MyFitsDatalakeLocationPermission",
            data_lake_principal=lakeformation_.CfnPermissions.
            DataLakePrincipalProperty(
                data_lake_principal_identifier=glue_role.role_arn),
            resource=lakeformation_.CfnPermissions.ResourceProperty(
                data_location_resource=lakeformation_.CfnPermissions.
                DataLocationResourceProperty(
                    s3_resource=target_bucket.bucket_arn)),
            permissions=["DATA_LOCATION_ACCESS"],
        )
        #make sure the location resource is created first
        location_permission.node.add_dependency(location_resource)
Esempio n. 12
0
def custom_fsx_task(self, host_port: int, family: str, file_system_id: str,
                    mad_secret_arn: str, mad_domain_name: str,
                    task_role: iam.Role, execution_role: iam.Role):
    on_create_aws_sdk_call = custom_resources.AwsSdkCall(
        physical_resource_id=custom_resources.PhysicalResourceId.from_response(
            'taskDefinition.taskDefinitionArn'),
        service="ECS",
        action=
        "registerTaskDefinition",  # https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/ECS.html (watch out for camel case!)
        parameters={
            "family":
            family,
            "taskRoleArn":
            task_role.role_arn,
            "executionRoleArn":
            execution_role.role_arn,
            "containerDefinitions": [{
                "name":
                "IISContainer",
                "image":
                "microsoft/iis",
                "cpu":
                512,
                "memory":
                1024,
                "links": [],
                "portMappings": [{
                    "containerPort": 80,
                    "hostPort": host_port,
                    "protocol": "tcp"
                }],
                "essential":
                True,
                "entryPoint": ["powershell", "-Command"],
                "mountPoints": [
                    {
                        "sourceVolume": file_system_id,
                        "containerPath": 'C:\\fsx-windows-dir',
                        "readOnly": False
                    },
                ],
                "command": [
                    '$IndexFilePath = "C:\\fsx-windows-dir\\index.html"; if ((Test-Path -Path $IndexFilePath) -ne $true){New-Item -Path $IndexFilePath -ItemType file -Value "<html> <head> <title>Amazon ECS Sample App</title> <style>body {margin-top: 40px; background-color: #ff3;} </style> </head><body> <div style=color:black;text-align:center> <h1>Amazon ECS Sample App</h1> <h2>Congratulations!</h2> <p>Your application is now running on a container in Amazon ECS.</p> <table style=margin-left:auto;margin-right:auto;><tr><th>TimeStamp</th><th>Task ID</th></tr>" -Force;}; $datetime = Get-Date -Format "yyyy-MM-dd HH:mm:ss"; $TaskId = (Invoke-RestMethod -Method GET -Uri $env:ECS_CONTAINER_METADATA_URI_V4/task).TaskARN.split("/")[2]; Add-Content -Path $IndexFilePath -Value "<tr><th>$datetime</th><th>$TaskId</th></tr>"; Copy-Item -Path $IndexFilePath -Destination C:\\inetpub\\wwwroot\\index.html -Force; C:\\ServiceMonitor.exe w3svc;'
                ]
            }],
            "volumes": [
                {
                    'name': file_system_id,
                    'fsxWindowsFileServerVolumeConfiguration': {
                        'fileSystemId': file_system_id,
                        'rootDirectory': 'share',
                        'authorizationConfig': {
                            'credentialsParameter': mad_secret_arn,
                            'domain': mad_domain_name
                        }
                    }
                },
            ],
            "requiresCompatibilities": ['EC2']
        })

    on_delete_aws_sdk_call = custom_resources.AwsSdkCall(
        physical_resource_id=custom_resources.PhysicalResourceId.from_response(
            'taskDefinition.taskDefinitionArn'),
        service="ECS",
        action=
        "deregisterTaskDefinition",  # https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/ECS.html (watch out for camel case!)
        parameters={
            "taskDefinition": custom_resources.PhysicalResourceIdReference(
            )  # https://docs.aws.amazon.com/cdk/api/latest/docs/custom-resources-readme.html#physical-resource-id-parameter 
        })

    custom_task = custom_resources.AwsCustomResource(
        self,
        "FSXTaskResource",
        policy=custom_resources.AwsCustomResourcePolicy.from_statements(
            statements=[
                iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                    actions=["ecs:*"],
                                    resources=["*"]),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=["iam:PassRole"],
                    resources=[task_role.role_arn, execution_role.role_arn])
            ]),
        on_create=on_create_aws_sdk_call,
        on_update=on_create_aws_sdk_call,
        on_delete=on_delete_aws_sdk_call)

    task_definition_arn = custom_task.get_response_field(
        'taskDefinition.taskDefinitionArn')
    return task_definition_arn
Esempio n. 13
0
    def __init__(self, scope: cdk.Construct, id: str, domain_name: str,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # User pool and user pool OAuth client

        cognito_user_pool = cognito.UserPool(
            self, "UserPool", removal_policy=cdk.RemovalPolicy.DESTROY)

        cognito.UserPoolDomain(
            self,
            "UserPoolDomain",
            cognito_domain=cognito.CognitoDomainOptions(
                domain_prefix=APPLICATION_PREFIX + "-" + APPLICATION_SUFFIX),
            user_pool=cognito_user_pool,
        )

        cognito_admin_user = cr.AwsCustomResource(
            self,
            "UserPoolAdminUserResource",
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(
                resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE),
            on_create=cr.AwsSdkCall(
                service="CognitoIdentityServiceProvider",
                action="adminCreateUser",
                parameters={
                    "UserPoolId":
                    cognito_user_pool.user_pool_id,
                    "Username":
                    "******",
                    "UserAttributes": [{
                        "Name":
                        "email",
                        "Value":
                        "consoleme_admin@" + domain_name
                    }],
                    "TemporaryPassword":
                    ADMIN_TEMP_PASSWORD,
                },
                physical_resource_id=cr.PhysicalResourceId.of(
                    cognito_user_pool.user_pool_id),
            ),
        )

        cognito_admin_group = cr.AwsCustomResource(
            self,
            "UserPoolAdminGroupResource",
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(
                resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE),
            on_create=cr.AwsSdkCall(
                service="CognitoIdentityServiceProvider",
                action="createGroup",
                parameters={
                    "UserPoolId": cognito_user_pool.user_pool_id,
                    "GroupName": "consoleme_admins",
                },
                physical_resource_id=cr.PhysicalResourceId.of(
                    id="UserPoolAdminGroupResource"),
            ),
        )

        cr.AwsCustomResource(
            self,
            "UserPoolUserGroupResource",
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(
                resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE),
            on_create=cr.AwsSdkCall(
                service="CognitoIdentityServiceProvider",
                action="createGroup",
                parameters={
                    "UserPoolId": cognito_user_pool.user_pool_id,
                    "GroupName": "consoleme_users",
                },
                physical_resource_id=cr.PhysicalResourceId.of(
                    id="UserPoolUserGroupResource"),
            ),
        )

        cognito_assign_admin_group = cr.AwsCustomResource(
            self,
            "UserPoolAssignAdminGroupResource",
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(
                resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE),
            on_create=cr.AwsSdkCall(
                service="CognitoIdentityServiceProvider",
                action="adminAddUserToGroup",
                parameters={
                    "UserPoolId": cognito_user_pool.user_pool_id,
                    "GroupName": "consoleme_admins",
                    "Username": "******",
                },
                physical_resource_id=cr.PhysicalResourceId.of(
                    id="UserPoolAssignAdminGroupResource"),
            ),
        )

        cognito_assign_admin_group.node.add_dependency(cognito_admin_user)
        cognito_assign_admin_group.node.add_dependency(cognito_admin_group)

        self.cognito_user_pool = cognito_user_pool
Esempio n. 14
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # TODO should this stuff be passed as inputs to the stack ?
        source_code_directory = "/opt/python"
        # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Cluster.html#aws_cdk.aws_ecs.Cluster.add_capacity
        asg_parameters = {
            "instance_type":
            ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2,
                                ec2.InstanceSize.MICRO),
            "machine_image":
            ecs.EcsOptimizedImage.amazon_linux2(),
            "desired_capacity":
            0,
            "max_capacity":
            5,
            "min_capacity":
            0,
        }
        # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Ec2TaskDefinition.html#aws_cdk.aws_ecs.Ec2TaskDefinition.add_container
        container_settings = {
            "memory_limit_mib": 300,
            "logging": ecs.AwsLogDriver(stream_prefix="ecslogs"),
        }
        input_bucket_name = "cdkdemoinput"
        output_bucket_name = "cdkdemooutput"

        # Create an Docker image from an given directory, which will be later published to Amazon ECR
        # TODO can this be cleanup on destroy as well ?
        container_image = ecs.ContainerImage.from_asset(
            directory=source_code_directory)

        # Create an Amazon ECS cluster
        cluster = ecs.Cluster(self, "ecscluster")
        cluster.apply_removal_policy(cdk.RemovalPolicy.DESTROY)

        # Create an auto scaling group for the ECS cluster
        asg = cluster.add_capacity("ecsautoscalinggroup", **asg_parameters)
        # TODO check if needed
        asg.apply_removal_policy(cdk.RemovalPolicy.DESTROY)

        # Create a capacity provider for the ECS cluster based on the auto scaling group
        capacity_provider = ecs.CfnCapacityProvider(
            self,
            "ecscapacityprovider",
            # Name can't start with ecs...
            name="capacityproviderecs",
            auto_scaling_group_provider=ecs.CfnCapacityProvider.
            AutoScalingGroupProviderProperty(
                auto_scaling_group_arn=asg.auto_scaling_group_name,
                managed_scaling=ecs.CfnCapacityProvider.ManagedScalingProperty(
                    status="ENABLED"),
                # TODO investigate this better
                managed_termination_protection="DISABLED",
            ),
        )
        capacity_provider.apply_removal_policy(cdk.RemovalPolicy.DESTROY)

        # Currently the CDK checks if the string is FARGATE or FARGATE_SPOT and errors out
        # cluster.add_capacity_provider(capacity_provider.name)
        lame_hack = cr.AwsCustomResource(
            self,
            "lamehack",
            on_create={
                "service":
                "ECS",
                "action":
                "putClusterCapacityProviders",
                "parameters": {
                    "cluster": cluster.cluster_arn,
                    "capacityProviders": [capacity_provider.name],
                    "defaultCapacityProviderStrategy": [],
                },
                "physical_resource_id":
                cr.PhysicalResourceId.of(str(int(time.time()))),
            },
            on_delete={
                "service": "ECS",
                "action": "putClusterCapacityProviders",
                "parameters": {
                    "cluster": cluster.cluster_arn,
                    "capacityProviders": [],
                    "defaultCapacityProviderStrategy": [],
                },
            },
            # TODO lower this permissions
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(
                resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE),
        )
        lame_hack.node.add_dependency(capacity_provider)
        lame_hack.node.add_dependency(cluster)

        # Create an ECS task definition with our Docker image
        task_definition = ecs.Ec2TaskDefinition(self, "ecstaskdefinition")
        container_definition = task_definition.add_container(
            "ecscontainer", image=container_image, **container_settings)
        # TODO lower this permissions
        task_definition.task_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonS3FullAccess"))
        task_definition.apply_removal_policy(cdk.RemovalPolicy.DESTROY)

        # Create the Amazon S3 input and output buckets
        input_bucket = s3.Bucket(
            self,
            "bucketinput",
            bucket_name=input_bucket_name,
            versioned=False,
            removal_policy=cdk.RemovalPolicy.DESTROY,
            auto_delete_objects=True,
        )

        output_bucket = s3.Bucket(
            self,
            "bucketoutput",
            bucket_name=output_bucket_name,
            versioned=False,
            removal_policy=cdk.RemovalPolicy.DESTROY,
            auto_delete_objects=True,
        )

        # Create the Amazon Lambda function for transforming the input from bucket information to the container inputs
        function = lambda_.Function(
            self,
            "inputlambda",
            code=lambda_.Code.from_inline(lambda_function_code),
            handler="index.lambda_handler",
            runtime=lambda_.Runtime.PYTHON_3_8,
            environment={
                "CAPACITY_PROVIDER_NAME": capacity_provider.name,
                "CLUSTER_NAME": cluster.cluster_arn,
                "CONTAINER_NAME": container_definition.container_name,
                "REGION_NAME": self.region,
                # TODO flaky, why can't we pass the ARN directly ?
                "OUTPUT_BUCKET_NAME": output_bucket.bucket_name,
                "TASK_DEFINITION": task_definition.task_definition_arn,
            },
        )
        # Add an S3 object creation trigger for the function
        function.add_event_source(
            lambda_event_sources.S3EventSource(
                input_bucket, events=[s3.EventType.OBJECT_CREATED]))
        # TODO fix this for less permissions
        function.role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonECS_FullAccess"))
        function.apply_removal_policy(cdk.RemovalPolicy.DESTROY)
    def __init__(self, scope: core.Construct, construct_id: str, executionRoleArn: str, virtualClusterId: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # policy to let Lambda invoke the api
        custom_policy_document = iam.PolicyDocument(statements=[
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                actions=["ec2:CreateSecurityGroup",
                        "ec2:RevokeSecurityGroupEgress",
                        "ec2:CreateSecurityGroup",
                        "ec2:DeleteSecurityGroup",
                        "ec2:AuthorizeSecurityGroupEgress",
                        "ec2:AuthorizeSecurityGroupIngress",
                        "ec2:RevokeSecurityGroupIngress",
                        "ec2:DeleteSecurityGroup"
                        ],
                resources=["*"]
            )
        ])
        managed_policy = iam.ManagedPolicy(self, "EMR_on_EKS_security_group",
            document=custom_policy_document
        )

        self.role = iam.Role(
                scope=self,
                id=f'{construct_id}-LambdaRole',
                assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'),
                managed_policies=[
                    iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole"),
                    managed_policy
                ]
            )

        # cert for endpoint
        crt, pkey = self.cert_gen(serialNumber=random.randint(1000,10000))
        mycert = custom.AwsCustomResource(self, "CreateCert",
            on_update={
                "service": "ACM",
                "action": "importCertificate",
                "parameters": {
                    "Certificate": crt.decode("utf-8"),
                    "PrivateKey": pkey.decode("utf-8")
                },
                "physical_resource_id": custom.PhysicalResourceId.from_response("CertificateArn")
            },
            policy=custom.AwsCustomResourcePolicy.from_sdk_calls(resources=custom.AwsCustomResourcePolicy.ANY_RESOURCE),
            role=self.role,
            function_name="CreateCertFn"
        )

        # Set up managed endpoint for Studio
        endpoint = custom.AwsCustomResource(self, "CreateEndpoint",
            on_create={
                "service": "EMRcontainers",
                "action": "createManagedEndpoint",
                "parameters": {
                    "certificateArn": mycert.get_response_field("CertificateArn"),
                    "executionRoleArn": executionRoleArn,
                    "name": "emr-endpoint-eks-spark",
                    "releaseLabel": "emr-6.2.0-latest",
                    "type": "JUPYTER_ENTERPRISE_GATEWAY",
                    "virtualClusterId": virtualClusterId,
                },
                "physical_resource_id": custom.PhysicalResourceId.from_response("arn")},
            policy=custom.AwsCustomResourcePolicy.from_sdk_calls(resources=custom.AwsCustomResourcePolicy.ANY_RESOURCE),
            role=self.role,
            function_name="CreateEpFn"
        )
        endpoint.node.add_dependency(mycert)
Esempio n. 16
0
    def __init__(self, scope: core.Construct, id: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        this_dir = path.dirname(__file__)

        handler = lmb.Function(self,
                               'Handler',
                               runtime=lmb.Runtime.PYTHON_3_7,
                               handler='handler.handler',
                               code=lmb.Code.from_asset(
                                   path.join(this_dir, 'lambda')))
        alias = lmb.Alias(self,
                          "HandlerAlias",
                          alias_name="Current",
                          version=handler.current_version)
        gw = apigw.LambdaRestApi(
            self,
            'Gateway',
            description='Endpoint for a singple Lambda-powered web service',
            handler=alias,
            endpoint_types=[EndpointType.REGIONAL])
        failure_alarm = cloudwatch.Alarm(
            self,
            "FailureAlarm",
            alarm_name=self.stack_name + '-' + '500Alarm',
            metric=cloudwatch.Metric(metric_name="5XXError",
                                     namespace="AWS/ApiGateway",
                                     dimensions={
                                         "ApiName": "Gateway",
                                     },
                                     statistic="Sum",
                                     period=core.Duration.minutes(1)),
            threshold=1,
            evaluation_periods=1)

        alarm500topic = sns.Topic(self,
                                  "Alarm500Topic",
                                  topic_name=self.stack_name + '-' +
                                  'Alarm500TopicSNS')
        alarm500topic.add_subscription(
            subscriptions.EmailSubscription("*****@*****.**"))
        failure_alarm.add_alarm_action(cw_actions.SnsAction(alarm500topic))
        codedeploy.LambdaDeploymentGroup(
            self,
            "DeploymentGroup",
            alias=alias,
            deployment_config=codedeploy.LambdaDeploymentConfig.
            CANARY_10_PERCENT_10_MINUTES,
            alarms=[failure_alarm])
        # Create a dynamodb table

        table_name = self.stack_name + '-' + 'HelloCdkTable'
        table = dynamodb.Table(self,
                               "TestTable",
                               table_name=table_name,
                               partition_key=Attribute(
                                   name="id",
                                   type=dynamodb.AttributeType.STRING))
        table_name_id = cr.PhysicalResourceId.of(table.table_name)
        on_create_action = AwsSdkCall(
            action='putItem',
            service='DynamoDB',
            physical_resource_id=table_name_id,
            parameters={
                'Item': {
                    'id': {
                        'S': 'HOLA_CREATE'
                    },
                    'date': {
                        'S': datetime.today().strftime('%Y-%m-%d')
                    },
                    'epoch': {
                        'N': str(int(time.time()))
                    }
                },
                'TableName': table_name
            })
        on_update_action = AwsSdkCall(
            action='putItem',
            service='DynamoDB',
            physical_resource_id=table_name_id,
            parameters={
                'Item': {
                    'id': {
                        'S': 'HOLA_UPDATE'
                    },
                    'date': {
                        'S': datetime.today().strftime('%Y-%m-%d')
                    },
                    'epoch': {
                        'N': str(int(time.time()))
                    }
                },
                'TableName': table_name
            })
        cr.AwsCustomResource(
            self,
            "TestTableCustomResource",
            on_create=on_create_action,
            on_update=on_update_action,
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(
                resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE))

        # OUTPUT
        self.url_output = core.CfnOutput(self, 'Url', value=gw.url)
Esempio n. 17
0
    def __init__(self, scope: core.Construct, id: str, cluster: eks.Cluster,
                 kafka: msk.CfnCluster, vpc: ec2.Vpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        pip.main([
            "install", "--system", "--target", "custom_resources/kafka/lib",
            "kafka-python"
        ])
        arn = cr.AwsCustomResource(
            self,
            'clusterArn',
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(resources=['*']),
            on_create=cr.AwsSdkCall(
                action='listClusters',
                service='Kafka',
                physical_resource_id=cr.PhysicalResourceId.of(
                    "ClusterNameFilter"),
                parameters={
                    "ClusterNameFilter": kafka.cluster_name,
                    "MaxResults": 1
                },
            ),
        )

        bootstraps = cr.AwsCustomResource(
            self,
            'clusterBootstraps',
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(resources=["*"]),
            on_create=cr.AwsSdkCall(
                action='getBootstrapBrokers',
                service='Kafka',
                physical_resource_id=cr.PhysicalResourceId.of("ClusterArn"),
                parameters={
                    "ClusterArn":
                    arn.get_response_field("ClusterInfoList.0.ClusterArn")
                },
            ),
        )

        manifests = []
        for namespace in self.node.try_get_context("kubernetes")['namespaces']:
            manifests.append({
                "apiVersion": "v1",
                "kind": "ConfigMap",
                "metadata": {
                    "name": "kafka",
                    "namespace": namespace
                },
                "data": {
                    "bootstrap":
                    bootstraps.get_response_field('BootstrapBrokerStringTls'),
                }
            })
        eks.KubernetesManifest(self,
                               "kafka-config",
                               cluster=cluster,
                               manifest=manifests)

        function = lbd.SingletonFunction(
            self,
            "KafkaConfigFunction",
            uuid="b09329a3-5206-46f7-822f-337da714aeac",
            code=lbd.Code.from_asset("custom_resources/kafka/"),
            handler="config.handler",
            runtime=lbd.Runtime.PYTHON_3_7,
            function_name="kafkaConfig",
            log_retention=logs.RetentionDays.ONE_DAY,
            security_group=ec2.SecurityGroup.from_security_group_id(
                self, "lambdaKafkaVPC", vpc.vpc_default_security_group),
            timeout=core.Duration.seconds(30),
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(one_per_az=True))

        provider = cr.Provider(self,
                               "KafkaConfigProvider",
                               on_event_handler=function,
                               log_retention=logs.RetentionDays.ONE_DAY)

        core.CustomResource(
            self,
            "KafkaLoadTopic",
            service_token=provider.service_token,
            properties={
                "bootstrap":
                bootstraps.get_response_field('BootstrapBrokerStringTls'),
                "topic":
                "load",
                "partitions":
                150,
                "replicas":
                1
            })

        core.CustomResource(
            self,
            "KafkaGenerateTopic",
            service_token=provider.service_token,
            properties={
                "bootstrap":
                bootstraps.get_response_field('BootstrapBrokerStringTls'),
                "topic":
                "generate",
                "partitions":
                200,
                "replicas":
                1
            })
    def __init__(
            self,
            scope: core.Construct,
            id: str,
            iam_policy: cr.AwsCustomResourcePolicy,
            group_name: str,
            username: str,
            identity_region: str,
            **kwargs):

        super().__init__(scope, id, **kwargs)

        aws_account_id = core.Aws.ACCOUNT_ID
        group_physical_id = id + group_name

        quicksight_group = cr.AwsCustomResource(self, 'QuickSightGroup',
                                                on_create={
                                                    "service": "QuickSight",
                                                    "action": "createGroup",
                                                    "parameters": {
                                                        "AwsAccountId": aws_account_id,
                                                        "GroupName": group_name,
                                                        "Namespace": "default",
                                                        "Description": "Group providing access to QuickSight resources created for the Analytics Reference Architecture"
                                                    },
                                                    "physical_resource_id": cr.PhysicalResourceId.of(group_physical_id),
                                                    "region": identity_region},
                                                on_delete={
                                                    "service": "QuickSight",
                                                    "action": "deleteGroup",
                                                    "parameters": {
                                                        "AwsAccountId": aws_account_id,
                                                        "GroupName": group_name,
                                                        "Namespace": "default",
                                                    },
                                                    "physical_resource_id": cr.PhysicalResourceId.of(group_physical_id),
                                                    "region": identity_region},
                                                policy=iam_policy
                                                )

        self.__group_arn = quicksight_group.get_response_field("Group.Arn")

        membership_physical_id = id + "groupmembership"

        membership = cr.AwsCustomResource(self, 'QuickSightGroupMembership',
                                          on_create={
                                              "service": "QuickSight",
                                              "action": "createGroupMembership",
                                              "parameters": {
                                                  "AwsAccountId": aws_account_id,
                                                  "GroupName": group_name,
                                                  "Namespace": "default",
                                                  "MemberName": username,
                                              },
                                              "physical_resource_id": cr.PhysicalResourceId.of(membership_physical_id),
                                              "region": identity_region},
                                          on_delete={
                                              "service": "QuickSight",
                                              "action": "deleteGroupMembership",
                                              "parameters": {
                                                  "AwsAccountId": aws_account_id,
                                                  "GroupName": group_name,
                                                  "Namespace": "default",
                                                  "MemberName": username,
                                              },
                                              "physical_resource_id": cr.PhysicalResourceId.of(membership_physical_id),
                                              "region": identity_region},
                                          policy=iam_policy
                                          )

        membership.node.add_dependency(quicksight_group)
Esempio n. 19
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        uri = self.account + '.dkr.ecr.' + self.region + '.amazonaws.com'
        appl = 'colorteller'
        buildspec = {
            'version': '0.2',
            'phases': {
                'install': {
                    'commands': ['echo install step']
                },
                'pre_build': {
                    'commands': [
                        'echo logging in to AWS ECR...',
                        '$(aws ecr get-login --no-include-email --region %s)' %
                        self.region
                    ]
                },
                'build': {
                    'commands': [
                        'echo building Docker image...',
                        'cd appmeshdemo/colorapp/%s' % appl,
                        'docker build -t %s:latest .' % appl,
                        'docker tag %s:latest %s/%s:latest' % (appl, uri, appl)
                    ]
                },
                'post_build': {
                    'commands': [
                        'echo Docker image build complete!',
                        'echo push latest Docker images to ECR...',
                        'docker push %s/%s:latest' % (uri, appl)
                    ]
                }
            }
        }

        buildenviron = codebuild.BuildEnvironment(
            privileged=True,
            build_image=codebuild.LinuxBuildImage.UBUNTU_14_04_DOCKER_18_09_0,
            environment_variables={
                'AWS_DEFAULT_REGION':
                codebuild.BuildEnvironmentVariable(value=self.region),
                'AWS_ACCOUNT_ID':
                codebuild.BuildEnvironmentVariable(value=self.account),
                'IMAGE_REPO_NAME':
                codebuild.BuildEnvironmentVariable(value=appl),
                'IMAGE_TAG':
                codebuild.BuildEnvironmentVariable(value='latest')
            })

        proj = codebuild.Project(
            self,
            appl,
            build_spec=codebuild.BuildSpec.from_object(buildspec),
            environment=buildenviron)
        call = custom.AwsSdkCall(service='CodeBuild',
                                 action='startBuild',
                                 parameters={'projectName': proj.project_name},
                                 physical_resource_id='Custom%s' %
                                 proj.project_name)

        custom.AwsCustomResource(self,
                                 'CustomCodeBuild',
                                 on_create=call,
                                 on_update=call)
Esempio n. 20
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        service_control_policy_string: str,
        description: str,
        name: str,
    ) -> None:
        super().__init__(scope, id)

        POLICY_ID_LOOKUP = "Policy.PolicySummary.Id"

        # https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/Organizations.html
        # https://docs.aws.amazon.com/cdk/api/latest/docs/custom-resources-readme.html#physical-resource-id-parameter
        on_create_policy = cr.AwsSdkCall(
            action="createPolicy",
            service="Organizations",
            physical_resource_id=cr.PhysicalResourceId.from_response(POLICY_ID_LOOKUP),
            parameters={
                "Content": service_control_policy_string,
                "Description": description,
                "Name": name,
                "Type": "SERVICE_CONTROL_POLICY",
            },
            output_path=POLICY_ID_LOOKUP,
        )

        on_update_policy = cr.AwsSdkCall(
            action="updatePolicy",
            service="Organizations",
            physical_resource_id=cr.PhysicalResourceId.from_response(POLICY_ID_LOOKUP),
            parameters={
                "Content": service_control_policy_string,
                "Description": description,
                "Name": name,
                "PolicyId": cr.PhysicalResourceIdReference(),
            },
            output_path=POLICY_ID_LOOKUP,
        )

        on_delete_policy = cr.AwsSdkCall(
            action="deletePolicy",
            service="Organizations",
            parameters={
                "PolicyId": cr.PhysicalResourceIdReference(),
            },
        )

        policy = cr.AwsCustomResourcePolicy.from_sdk_calls(
            resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE
        )
        scp_create = cr.AwsCustomResource(
            self,
            "ServiceControlPolicyCreate",
            install_latest_aws_sdk=True,
            policy=policy,
            on_create=on_create_policy,
            on_update=on_update_policy,
            on_delete=on_delete_policy,
            resource_type="Custom::ServiceControlPolicy",
        )

        self.policy_id = scp_create.get_response_field(POLICY_ID_LOOKUP)
Esempio n. 21
0
    def __init__(
        self,
        scope: cdk.Construct,
        id: str,
        consoleme_alb: lb.ApplicationLoadBalancer,
        **kwargs
    ) -> None:
        super().__init__(scope, id, **kwargs)

        hosted_zone = route53.PublicHostedZone.from_hosted_zone_attributes(
            self,
            "HostedZone",
            hosted_zone_id=HOSTED_ZONE_ID,
            zone_name=HOSTED_ZONE_NAME,
        )

        route53_record = route53.ARecord(
            self,
            "LBRecord",
            zone=hosted_zone,
            record_name=APPLICATION_PREFIX,
            target=route53.RecordTarget(
                alias_target=(route53_targets.LoadBalancerTarget(consoleme_alb))
            ),
        )

        verify_ses_identity = cr.AwsCustomResource(
            self,
            "VerifySESIdentityResource",
            policy=cr.AwsCustomResourcePolicy.from_statements(
                statements=[
                    iam.PolicyStatement(
                        effect=iam.Effect.ALLOW,
                        actions=["ses:VerifyDomainIdentity", "ses:DeleteIdentity"],
                        resources=["*"],
                    )
                ]
            ),
            on_create=cr.AwsSdkCall(
                service="SES",
                action="verifyDomainIdentity",
                parameters={"Domain": route53_record.domain_name},
                physical_resource_id=cr.PhysicalResourceId.from_response(
                    "VerificationToken"
                ),
            ),
            on_delete=cr.AwsSdkCall(
                service="SES",
                action="deleteIdentity",
                parameters={"Identity": route53_record.domain_name},
            ),
            install_latest_aws_sdk=True,
            log_retention=logs.RetentionDays.ONE_WEEK,
        )

        add_ses_dkim = cr.AwsCustomResource(
            self,
            "VerifySESDKIMResource",
            policy=cr.AwsCustomResourcePolicy.from_statements(
                statements=[
                    iam.PolicyStatement(
                        effect=iam.Effect.ALLOW,
                        actions=["ses:VerifyDomainDkim"],
                        resources=["*"],
                    )
                ]
            ),
            on_create=cr.AwsSdkCall(
                service="SES",
                action="verifyDomainDkim",
                parameters={"Domain": route53_record.domain_name},
                physical_resource_id=cr.PhysicalResourceId.of(
                    HOSTED_ZONE_ID + "VerifyDomainDKIM"
                ),
            ),
            install_latest_aws_sdk=True,
            log_retention=logs.RetentionDays.ONE_WEEK,
        )

        add_ses_dkim.node.add_dependency(verify_ses_identity)

        certificate = acm.Certificate(
            self,
            "Certificate",
            domain_name="*." + hosted_zone.zone_name,
            validation=acm.CertificateValidation.from_dns(hosted_zone=hosted_zone),
        )

        self.hosted_zone = hosted_zone
        self.certificate = certificate
        self.route53_record = route53_record
    def __init__(
        self,
        scope: cdk.Construct,
        construct_id: str,
        vpc: ec2.Vpc,
        domain: sagemaker.CfnDomain,
        **kwargs,
    ) -> None:
        super().__init__(scope, construct_id, **kwargs)

        studio_domain_id = (domain.attr_domain_id
                            )  # cdk.Fn.import_value("StudioDomainId")

        # Get the security group associated with the EFS volume managed by SageMaker Studio
        get_parameter = cr.AwsCustomResource(
            self,
            "GetEfsSgId",
            on_update={  # will also be called for a CREATE event
                "service": "EC2",
                "action": "describeSecurityGroups",
                "parameters": {
                    "Filters": [
                        {"Name": "vpc-id", "Values": [vpc.vpc_id]},
                        {
                            "Name": "group-name",
                            "Values": [
                                f"security-group-for-inbound-nfs-{studio_domain_id}"
                            ],
                        },
                    ]
                },
                "physical_resource_id": cr.PhysicalResourceId.of("GetEfsSgId"),
            },
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(
                resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE
            ),
        )
        sg_name = get_parameter.get_response_field("SecurityGroups.0.GroupId")
        sg_efs = ec2.SecurityGroup.from_security_group_id(
            self, "SG", security_group_id=sg_name)

        # We can now retrive a handler for the EFS volume
        StudioDomainEfsId = cdk.Fn.import_value("StudioDomainEfsId")
        studio_efs = efs.FileSystem.from_file_system_attributes(
            self,
            "StudioEFS",
            file_system_id=StudioDomainEfsId,
            security_group=sg_efs)

        # Create EFS access point to enable the lambda fn to mount the EFS volume
        efs_ap = efs.AccessPoint(
            self,
            "EfsAccessPoint",
            file_system=studio_efs,
            posix_user=efs.PosixUser(gid="0", uid="0"),
        )

        # Function that takes care of setting up the user environment
        self.lambda_fn = lambda_python.PythonFunction(
            self,
            "UserSetupLambdaFn",
            entry="populate_git_fn",
            index="populate_from_git.py",
            handler="on_event",
            vpc=vpc,
            layers=[
                lambda_.LayerVersion.from_layer_version_arn(
                    self,
                    "GitLayer",
                    layer_version_arn=
                    f"arn:aws:lambda:{self.region}:553035198032:layer:git-lambda2:8",
                ),
            ],
            filesystem=lambda_.FileSystem.from_efs_access_point(
                efs_ap, "/mnt/efs"),
            timeout=cdk.Duration.seconds(300),
            initial_policy=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=[
                        "sagemaker:DescribeUserProfile",
                    ],
                    resources=["*"],
                )
            ],
        )

        provider = cr.Provider(
            self,
            "Provider",
            on_event_handler=self.lambda_fn,
        )

        cdk.CfnOutput(
            self,
            "StudioUserProviderToken",
            value=provider.service_token,
            description="StudioUserProviderToken",
            export_name="StudioUserProviderToken",
        )

        self.provider = provider