Пример #1
0
    def setup_iot_endpoint_provider(self):
        describe_endpoint_policy = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=["iot:DescribeEndpoint"],
            resources=["*"],
        )

        provider_lambda = aws_lambda.SingletonFunction(
            self,
            "iot_data_ats_endpoint_provider_lambda",
            uuid="iot_data_ats_endpoint_provider_lambda_20200507150213",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset("custom_resources/iot_endpoint"),
            handler="iot_endpoint_provider.on_event",
            description="Returns iot:Data-ATS endpoint for this account",
            current_version_options=aws_lambda.VersionOptions(
                removal_policy=core.RemovalPolicy.DESTROY),
            initial_policy=[describe_endpoint_policy],
        )

        provider = custom_resources.Provider(self,
                                             "iot_data_ats_endpoint_provider",
                                             on_event_handler=provider_lambda)

        iot_endpoint = core.CustomResource(
            self,
            "iot_data_ats_endpoint",
            resource_type="Custom::IotDataAtsEndpoint",
            service_token=provider.service_token,
        )

        endpoint_address = iot_endpoint.get_att("EndpointAddress").to_string()

        self._parameters_to_save["iot_endpoint_address"] = endpoint_address
Пример #2
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        on_event = _lambda.Function(
            self,
            'ConnectHandler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda'),
            handler='connect_create.handler',
        )

        on_event.add_to_role_policy(
            iam.PolicyStatement(actions=[
                "connect:CreateInstance", "connect:DeleteInstance",
                "ds:CreateAlias", "ds:AuthorizeApplication",
                "ds:UnauthorizeApplication", "ds:CreateIdentityPoolDirectory",
                "ds:CreateDirectory", "ds:DescribeDirectories",
                "ds:CheckAlias", "ds:DeleteDirectory", "iam:AttachRolePolicy",
                "iam:CreateServiceLinkedRole", "iam:PutRolePolicy"
            ],
                                resources=["*"]))

        my_provider = cr.Provider(
            self,
            "MyProvider",
            on_event_handler=on_event,
            #is_complete_handler=is_complete, # optional async "waiter"
            log_retention=logs.RetentionDays.ONE_DAY)

        CustomResource(self,
                       "Resource1",
                       service_token=my_provider.service_token)
Пример #3
0
    def wrapper(*args, **kwargs):

      hashed = hashlib.md5(bytes(json.dumps(args) + json.dumps(kwargs), 'UTF-8'))
      hashi = hashed.hexdigest()[:9]
      function_name = func.__name__ + hashi

      remote = lamb.Function(self._scope, f'LambdaFunction-{function_name}',
        code=lamb.Code.from_inline(self._create_function_code(func)),
        runtime=lamb.Runtime.PYTHON_3_7,
        handler='index.handler',
        timeout=cdk.Duration.minutes(15),
        memory_size=256,
      )

      provider = cr.Provider(self._scope, f'CustomResourceProvider-{function_name}', on_event_handler=remote)

      # stack = cdk.Stack.of(self._scope)
      resource = cdk.CustomResource(self._scope, f'CustomResource-{function_name}',
        service_token=provider.service_token,
        properties={

          # avoid any CFN type conversion quirks by serializing
          # on output and deserializing on input
          'args': json.dumps(list(args)),
          'kwargs': json.dumps(dict(kwargs))
        })

      return resource.get_att_string('Value')
Пример #4
0
    def create_custom_authorizer_signing_key_generic(
            self, unique_id, description, token_value) -> core.CustomResource:
        """
        Uses a Lambda to create an asymmetric key pair, since neither CFn nor CDK support that as of
        this writing (2020-05-09)
        https://github.com/aws-cloudformation/aws-cloudformation-coverage-roadmap/issues/337

        After creating the key, it signs the token value using the private key, and stores all of
        `token_value`, `token_value`'s signature, and the public key in the stack's parameter store.

        :return: the CustomResource for the signing key
        """
        create_authorizer_policy = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "kms:CreateKey", "kms:GetPublicKey", "kms:ScheduleKeyDeletion",
                "kms:Sign"
            ],
            resources=["*"],
        )
        provider_lambda = aws_lambda.SingletonFunction(
            self,
            f"iot_custom_authorizer_key_provider_lambda_{unique_id}",
            uuid=
            f"iot_custom_authorizer_key_provider_lambda_20200507150213_{unique_id}",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset(
                "custom_resources/iot_custom_authorizer_key_provider"),
            handler="iot_custom_authorizer_key_provider.on_event",
            description=description,
            current_version_options=aws_lambda.VersionOptions(
                removal_policy=core.RemovalPolicy.DESTROY),
            initial_policy=[create_authorizer_policy],
        )

        provider = custom_resources.Provider(
            self,
            f"iot_custom_authorizer_key_provider_{unique_id}",
            on_event_handler=provider_lambda,
        )

        iot_custom_authorizer_key = core.CustomResource(
            self,
            f"iot_custom_authorizer_key_{unique_id}",
            resource_type="Custom::IoTCustomAuthorizer",
            service_token=provider.service_token,
            properties={"token_value": token_value},
        )

        return iot_custom_authorizer_key
    def __init__(self, scope: core.Construct, id: str, bucket_name: str,
                 uuid: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        bucket_storage = _s3.LifecycleRule(transitions=[
            _s3.Transition(storage_class=_s3.StorageClass.INTELLIGENT_TIERING,
                           transition_after=core.Duration.days(1))
        ])

        self.__bucket = _s3.Bucket(self,
                                   'S3Bucket',
                                   bucket_name=bucket_name,
                                   removal_policy=core.RemovalPolicy.DESTROY,
                                   encryption=_s3.BucketEncryption.KMS_MANAGED,
                                   lifecycle_rules=[bucket_storage])

        with open('common/common_cdk/lambda/empty_bucket.py', 'r') as f:
            lambda_source = f.read()

        empty_bucket_lambda = _lambda.SingletonFunction(
            self,
            'EmptyBucketLambda',
            uuid=uuid,
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            timeout=core.Duration.minutes(15))

        empty_bucket_lambda.role.add_to_policy(
            _iam.PolicyStatement(actions=[
                's3:DeleteObject', 's3:DeleteObjectVersion',
                's3:ListBucketVersions', 's3:ListBucket'
            ],
                                 resources=[
                                     self.__bucket.bucket_arn + '/*',
                                     self.__bucket.bucket_arn
                                 ]))

        empty_bucket_lambda_provider = _custom_resources.Provider(
            self,
            'EmptyBucketLambdaProvider',
            on_event_handler=empty_bucket_lambda)

        custom_resource = core.CustomResource(
            self,
            'EmptyBucketCustomResource',
            service_token=empty_bucket_lambda_provider.service_token,
            properties={"bucket_name": self.__bucket.bucket_name})

        custom_resource.node.add_dependency(self.__bucket)
    def __init__(self, scope: cdk.Construct, construct_id: str, stage: str,
                 table: _dynamo.Table, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        initial_data_role = iam.Role(
            self,
            "InitialDataRole",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaBasicExecutionRole")
            ],
        )

        initial_data_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AWSLambdaInvocation-DynamoDB"))
        initial_data_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonDynamoDBFullAccess"))

        on_event = _lambda.Function(
            self,
            "DataHandler",
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset("lambda"),
            handler="initial_data.lambda_handler",
            timeout=cdk.Duration.minutes(5),
            environment={
                "TABLE_NAME": table.table_name,
                "STAGE": stage
            },
        )

        table.grant_full_access(on_event)

        initial_data_provider = _resources.Provider(
            self,
            "InitialDataProvider",
            on_event_handler=on_event,
            log_retention=logs.RetentionDays.ONE_DAY,
            role=initial_data_role,
        )

        cdk.CustomResource(
            self,
            "InitialDataResource",
            service_token=initial_data_provider.service_token,
        )
Пример #7
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        policy_id: str,
        account_targets: List[str] = None,
        organization_unit_targets: List[str] = None,
    ) -> None:
        super().__init__(scope, id)

        on_event = _lambda.Function(
            self,
            "ON-SCP-ATTACHMENT-EVENT",
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler="app.on_event",
            timeout=core.Duration.seconds(600),
            memory_size=128,
            code=_lambda.Code.asset(os.path.join(dirname,
                                                 "attachment_lambda")),
            description="Service control policy attachment resource",
        )

        on_event.add_to_role_policy(
            iam.PolicyStatement(
                actions=[
                    "organizations:CreatePolicy",
                    "organizations:DeletePolicy",
                    "organizations:AttachPolicy",
                    "organizations:DetachPolicy",
                ],
                resources=["*"],
            ))

        attachment_provider = cr.Provider(
            self,
            "ON_EVENT_CUSTOM_RESOURCE_PROVIDER",
            on_event_handler=on_event,
        )

        CustomResource(
            self,
            "scp-attachment-custom-resource",
            service_token=attachment_provider.service_token,
            properties={
                "PolicyId": policy_id,
                "AccountTargets": account_targets,
                "OrganizationUnitTargets": organization_unit_targets,
            },
        )
    def __init__(self, scope: core.Construct, id: str, f_lambda,
                 bot_locale) -> None:
        super().__init__(scope, id)

        on_event = _lambda.Function(
            self,
            "ON-EVENT",
            runtime=_lambda.Runtime.PYTHON_3_6,
            handler="lex-bot-provider.on_event",
            timeout=core.Duration.seconds(60),
            memory_size=256,
            code=_lambda.Code.asset("./custom_resource_lex_bot/lambda"),
            description='PROCESA EVENTOS CUSTOM RESOURCE',
            environment={
                'LAMBDA_ARN_FULLFILL': f_lambda.function_arn,
                'BOT_LOCALE': bot_locale
            })

        is_complete = _lambda.Function(
            self,
            "IS-COMPLETE",
            runtime=_lambda.Runtime.PYTHON_3_6,
            handler="lex-bot-provider.is_complete",
            timeout=core.Duration.seconds(60),
            memory_size=256,
            code=_lambda.Code.asset("./custom_resource_lex_bot/lambda"),
            description='IS COMPLETE HANDLER')

        on_event.add_to_role_policy(
            iam.PolicyStatement(actions=["lex:*"], resources=['*']))
        is_complete.add_to_role_policy(
            iam.PolicyStatement(actions=["lex:*"], resources=['*']))

        my_provider = cr.Provider(
            self,
            "ON_EVENT_CUSTOM_RESOURCE_PROVIDER",
            on_event_handler=on_event,
            is_complete_handler=is_complete,  # optional async "waiter"
            log_retention=logs.RetentionDays.ONE_DAY)

        CustomResource(self,
                       "lexbotcustom",
                       service_token=my_provider.service_token)
    def __init__(self, scope: core.Construct, id: str, secgroup_name: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        with open('common/common_cdk/lambda/empty_security_group.py', 'r') as f:
            lambda_source = f.read()

        # lambda utils to empty security group before deletion
        empty_secgroup_lambda = _lambda.SingletonFunction(self, 'EmptySecurityGroupLambda',
                                                          uuid="dfs3k8730-4ee1-11e8-9c2d-fdfs65dfsc",
                                                          runtime=_lambda.Runtime.PYTHON_3_7,
                                                          code=_lambda.Code.inline(lambda_source),
                                                          handler='index.handler',
                                                          function_name='ara-auto-empty-secgroup'
                                                          )

        empty_secgroup_lambda_role = _iam.Role(
            self, 'AutoEmptyBucketLambdaRole',
            assumed_by=_iam.ServicePrincipal('lambda.amazonaws.com')
        )

        empty_secgroup_lambda_role.add_to_policy(
            _iam.PolicyStatement(
                actions=[
                    'ec2:RevokeSecurityGroupIngress',
                    'ec2:RevokeSecurityGroupEgress'
                ],
                resources=['arn:aws:ec2::'+core.Aws.ACCOUNT_ID+':security-group/'+secgroup_name]
            )
        )

        empty_secgroup_lambda_provider = _custom_resources.Provider(
            self, 'EmptyBucketLambdaProvider',
            on_event_handler=empty_secgroup_lambda
        )

        core.CustomResource(
            self, 'EmptyBucketCustomResource',
            service_token=empty_secgroup_lambda_provider.service_token,
            properties={
                "secgroup_name": secgroup_name
            }
        )
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create bucket and upload scrips 
        bucket = s3.Bucket(self, "ScriptBucket")

        self.script_bucket = bucket

        s3deploy.BucketDeployment(self, "UploadScriptsToBucket",
            sources=[s3deploy.Source.asset(os.path.join(dirname, "scripts"))],
            destination_bucket=bucket
        )

        # Greengrass Core Thing policy
        greengrass_core_policy = iot.CfnPolicy(self,
            'GreenGrassCorePolicy',
            policy_name='greengrass-demo-policy',
            policy_document={
                "Version": "2012-10-17",
                "Statement": [
                    {
                        "Effect": "Allow",
                        "Action": [
                            "iot:Publish",
                            "iot:Subscribe",
                            "iot:Connect",
                            "iot:Receive"
                        ],
                        "Resource": [
                            "*"
                        ]
                    },
                    {
                        "Effect": "Allow",
                        "Action": [
                            "iot:GetThingShadow",
                            "iot:UpdateThingShadow",
                            "iot:DeleteThingShadow"
                        ],
                        "Resource": [
                            "*"
                        ]
                    },
                    {
                        "Effect": "Allow",
                        "Action": [
                            "greengrass:*"
                        ],
                        "Resource": [
                            "*"
                        ]
                    }
                ]
            }
        )

        self.core_policy_name = greengrass_core_policy.policy_name

        # Create a Greengrass group role
        greengrass_group_role = iam.Role(self, "GroupRole",
            assumed_by=iam.ServicePrincipal("greengrass.amazonaws.com")
        )
        greengrass_group_role.add_to_policy(iam.PolicyStatement(
            resources=["arn:aws:logs:*:*:*"],
            actions=[
                "logs:CreateLogGroup",
                "logs:CreateLogStream",
                "logs:PutLogEvents"
            ]
        ))
        greengrass_group_role.add_to_policy(iam.PolicyStatement(
            resources=["*"],
            actions=["iot:*"]
        ))
     
        self.greengrass_group_role_arn = greengrass_group_role.role_arn
        
        # A custom resource to verify that there is a service role for greengrass on the account 
        greengrass_mgmt_function = awslambda.SingletonFunction(
            self,
            "MgmttHandler",
            uuid="58854ea2-0624-4ca5-b600-fa88d4b9164e",
            runtime=awslambda.Runtime.PYTHON_3_7,
            code=awslambda.Code.asset("custom_resources"),
            handler="greengrassmgmt.handler",
        )

        greengrass_mgmt_function.add_to_role_policy(
            iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=[
                        'greengrass:*',
                        'iot:*',
                        'iam:CreateRole',
                        'iam:AttachRolePolicy',
                        'iam:PassRole'
                    ],
                    resources=['*']
                )
        )

        greengrass_mgmt_provider = cust_resource.Provider(self, "MgmtProvider",
            on_event_handler=greengrass_mgmt_function
        )

        core.CustomResource(self, "MgmtCustResource", 
            service_token=greengrass_mgmt_provider.service_token
        )
Пример #11
0
    def setup_custom_authorizer(self):
        # These values are used in the custom authorizer setup, and exported to Parameter Store
        # for use by integration tests
        custom_authorizer_name = "iot_custom_authorizer"
        self._parameters_to_save[
            "custom_authorizer_name"] = custom_authorizer_name

        # Note: "key" is a bit overloaded here. In the context of the custom authorizer, "key name"
        # refers to the HTTP header field that the custom authorizer looks for a token value in.
        #
        # In the case of the custom authorizer key provider, the "key" is the KMS asymmetric CMK
        # used to sign the token value passed in the `token_key_name` header. In order to keep the
        # terminology consistent between client integ tests that are expecting to pass something for
        # a "key name" field, we'll let the ambiguity stand.
        token_key_name = "iot_custom_authorizer_token"
        self._parameters_to_save[
            "custom_authorizer_token_key_name"] = token_key_name

        token_value = "allow"
        self._parameters_to_save["custom_authorizer_token_value"] = token_value

        iot_custom_authorizer_key_resource = self.create_custom_authorizer_signing_key_generic(
            "1",
            "Manages an asymmetric CMK and token signature for iot custom authorizer.",
            token_value,
        )

        custom_authorizer_token_signature = iot_custom_authorizer_key_resource.get_att(
            "custom_authorizer_token_signature").to_string()
        self._parameters_to_save[
            "custom_authorizer_token_signature"] = custom_authorizer_token_signature

        authorizer_function_arn = self.setup_custom_authorizer_function(
            "1",
            "custom_resources/iot_custom_authorizer_function",
            "iot_custom_authorizer.handler",
            "Sample custom authorizer that allows or denies based on 'token' value",
            {},
            self.region,
        )

        create_authorizer_policy = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "iot:CreateAuthorizer", "iot:UpdateAuthorizer",
                "iot:DeleteAuthorizer"
            ],
            resources=["*"],
        )
        provider_lambda = aws_lambda.SingletonFunction(
            self,
            "iot_custom_authorizer_provider_lambda",
            uuid=self.custom_auth_user_pass_uuid,
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset(
                "custom_resources/iot_custom_authorizer_provider"),
            handler="iot_custom_authorizer_provider.on_event",
            description="Sets up an IoT custom authorizer",
            current_version_options=aws_lambda.VersionOptions(
                removal_policy=core.RemovalPolicy.DESTROY),
            initial_policy=[create_authorizer_policy],
        )

        provider = custom_resources.Provider(self,
                                             "iot_custom_authorizer_provider",
                                             on_event_handler=provider_lambda)

        public_key = iot_custom_authorizer_key_resource.get_att(
            "custom_authorizer_public_key").to_string()

        core.CustomResource(
            self,
            "iot_custom_authorizer",
            resource_type="Custom::IoTCustomAuthorizer",
            service_token=provider.service_token,
            properties={
                "authorizer_function_arn": authorizer_function_arn,
                "authorizer_name": custom_authorizer_name,
                "public_key": public_key,
                "token_key_name": token_key_name,
            },
        )
Пример #12
0
    def setup_custom_authorizer_user_pass(self):
        custom_authorizer_name = self.custom_auth_user_pass_default_authorizer_name
        self._parameters_to_save[
            "custom_authorizer_user_pass_name"] = custom_authorizer_name
        token_key_name = "IoTTokenKeyName"
        self._parameters_to_save[
            "custom_authorizer_user_pass_token_key_name"] = token_key_name
        token_value = "allow"
        self._parameters_to_save[
            "custom_authorizer_user_pass_token_value"] = token_value
        self._parameters_to_save[
            "custom_authorizer_user_pass_username"] = self.custom_auth_user_pass_username
        self._parameters_to_save[
            "custom_authorizer_user_pass_password"] = self.custom_auth_user_pass_password

        iot_custom_authorizer_key_resource = self.create_custom_authorizer_signing_key_generic(
            "2",
            "Manages an asymmetric CMK and token signature for iot custom authorizer with username and password.",
            token_value,
        )

        custom_authorizer_token_signature = iot_custom_authorizer_key_resource.get_att(
            "custom_authorizer_token_signature").to_string()
        self._parameters_to_save[
            "custom_authorizer_user_pass_token_signature"] = custom_authorizer_token_signature

        # TODO: remove forcing of us-east-1 when enhanced custom authorizers are available in all regions
        # Force region to 'us-east-1' due to enhanced custom authorizers only available in this region
        authorizer_function_arn = self.setup_custom_authorizer_function(
            "2",
            "custom_resources/iot_custom_authorizer_user_pass_function",
            "iot_custom_authorizer_user_pass.handler",
            "Sample custom authorizer that allows or denies based on username and password",
            {
                "custom_auth_user_pass_username":
                self.custom_auth_user_pass_username,
                "custom_auth_user_pass_password":
                self.custom_auth_user_pass_password
            },
            "us-east-1",
        )
        create_authorizer_policy = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "iot:CreateAuthorizer",
                "iot:UpdateAuthorizer",
                "iot:DeleteAuthorizer",
                "iot:UpdateDomainConfiguration",
                "iot:CreateDomainConfiguration",
                "iot:DescribeDomainConfiguration",
                "iot:DeleteDomainConfiguration",
            ],
            resources=["*"],
        )
        provider_lambda = aws_lambda.SingletonFunction(
            self,
            "iot_custom_authorizer_user_pass_provider_lambda",
            uuid=
            "iot_custom_authorizer_user_pass_provider_lambda_20200727123737",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset(
                "custom_resources/iot_custom_authorizer_user_pass_provider"),
            handler="iot_custom_authorizer_user_pass_provider.on_event",
            description=
            "Sets up an IoT custom authorizer for user password & required domain config due to beta status",
            environment={
                "custom_auth_user_pass_uuid":
                self.custom_auth_user_pass_uuid,
                "custom_auth_user_pass_default_authorizer_name":
                self.custom_auth_user_pass_default_authorizer_name,
                "custom_auth_user_pass_domain_configuration_name":
                self.custom_auth_user_pass_domain_configuration_name
            },
            current_version_options=aws_lambda.VersionOptions(
                removal_policy=core.RemovalPolicy.DESTROY),
            initial_policy=[create_authorizer_policy],
        )

        provider = custom_resources.Provider(
            self,
            "iot_custom_authorizer_user_pass_provider",
            on_event_handler=provider_lambda)

        public_key = iot_custom_authorizer_key_resource.get_att(
            "custom_authorizer_public_key").to_string()

        iot_endpoint = core.CustomResource(
            self,
            "iot_custom_authorizer_user_pass",
            resource_type="Custom::IoTCustomAuthorizer",
            service_token=provider.service_token,
            properties={
                "authorizer_function_arn": authorizer_function_arn,
                "authorizer_name": custom_authorizer_name,
                "public_key": public_key,
                "token_key_name": token_key_name,
            },
        )
        endpoint_address = iot_endpoint.get_att(
            "BetaEndpointAddress").to_string()
        self._parameters_to_save[
            "iot_beta_endpoint_address"] = endpoint_address
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Add the VPC connection arn as an input parameter
        vpc_conn_arn = core.CfnParameter(
            self,
            "VpcConnectionArn",
            type="String",
            description="The Arn of the VPC connection to use for Redshift.")

        quicksight_group_arn = core.Fn.import_value('ara-QuickSight-Group-Arn')
        secret_arn = core.Fn.import_value('ara-QuickSight-Redshift-Secret-Arn')

        # Create the custom resource policy with the necessary permissions
        iam_policy = cr.AwsCustomResourcePolicy.from_statements([
            iam.PolicyStatement(actions=cfg.CDK_POLICY_ACTIONS,
                                resources=['*']),
        ])

        redshift_datasource_lambda = lambda_.SingletonFunction(
            self,
            id='RedshiftDatasourceLambda',
            uuid='b438edeb-f5dc-486a-ac2d-bc0918b975b8',
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.from_asset('dataviz_redshift/lambda'),
            handler='redshift_datasource.handler',
            function_name='ara_redshift_datasource')

        redshift_datasource_lambda.role.add_to_policy(
            iam.PolicyStatement(actions=['secretsmanager:GetSecretValue'],
                                resources=[secret_arn]))

        redshift_datasource_lambda.role.add_to_policy(
            iam.PolicyStatement(actions=[
                'quicksight:CreateDataSource', 'quicksight:DeleteDataSource'
            ],
                                resources=['*']))

        lambda_provider = cr.Provider(
            self,
            id='LambdaProvider',
            on_event_handler=redshift_datasource_lambda)

        responseLamb = core.CustomResource(
            self,
            'RedshiftDatasourceResource',
            service_token=lambda_provider.service_token,
            properties={
                'Secret_arn': secret_arn,
                'Datasource_name': cfg.REDSHIFT_DATASOURCE_NAME,
                'Aws_account_id': self.account,
                'Quicksight_group_arn': quicksight_group_arn,
                'Datasource_actions': cfg.DATASOURCE_ACTIONS,
                'Vpc_conn_arn': vpc_conn_arn.value_as_string
            })

        redshift_datasource_arn = responseLamb.get_att_string('datasource_arn')

        core.CfnOutput(self,
                       "RedshiftDataSourceArn",
                       description="Redshift Data Source Arn",
                       value=redshift_datasource_arn)

        # Create a Redshift dataset with custom SQL
        redshift_dataset_arn = QuickSightRedshiftDataset(
            self,
            'RedshiftDataset',
            iam_policy=iam_policy,
            quicksight_group_arn=quicksight_group_arn,
            redshift_datasource_arn=redshift_datasource_arn,
            redshift_dataset_name=cfg.REDSHIFT_DATASET_NAME,
            dataset_actions=cfg.DATASET_ACTIONS,
            redshift_custom_sql=cfg.REDSHIFT_CUSTOM_SQL,
            redshift_columns=cfg.REDSHIFT_COLUMNS,
            redshift_data_transformations=cfg.REDSHIFT_DATA_TRANSFORMATIONS
        ).redshift_dataset_arn

        QuickSightRedshiftAnalysis(
            self,
            'RedshiftAnalysis',
            iam_policy=iam_policy,
            quicksight_group_arn=quicksight_group_arn,
            redshift_dataset_arn=redshift_dataset_arn,
            redshift_analysis_name=cfg.REDSHIFT_ANALYSIS_NAME,
            redshift_analysis_template_alias=cfg.
            REDSHIFT_ANALYSIS_TEMPLATE_ALIAS,
            analysis_actions=cfg.ANALYSIS_ACTIONS)
Пример #14
0
    def __init__(self, scope: cdk.Construct, id: str,
                 cognito_user_pool: cognito.UserPool, s3_bucket_name: str,
                 create_configuration_lambda_role_arn: str,
                 redis: ec.CfnCacheCluster, domain_name: str,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        config_yaml = yaml.load(open("config.yaml"), Loader=yaml.FullLoader)
        spoke_accounts = config_yaml.get("spoke_accounts", [])

        cognito_user_pool_client = cognito.UserPoolClient(
            self,
            "UserPoolClient",
            user_pool=cognito_user_pool,
            generate_secret=True,
            supported_identity_providers=[
                cognito.UserPoolClientIdentityProvider.COGNITO
            ],
            prevent_user_existence_errors=True,
            o_auth=cognito.OAuthSettings(
                callback_urls=[
                    "https://" + domain_name + "/auth",
                    "https://" + domain_name + "/oauth2/idpresponse",
                ],
                logout_urls=["https://" + domain_name + "/logout"],
                flows=cognito.OAuthFlows(authorization_code_grant=True,
                                         implicit_code_grant=True),
                scopes=[cognito.OAuthScope.OPENID, cognito.OAuthScope.EMAIL],
            ),
            auth_flows=cognito.AuthFlow(user_password=True, user_srp=True),
        )

        describe_cognito_user_pool_client = cr.AwsCustomResource(
            self,
            "UserPoolClientIDResource",
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(
                resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE),
            on_create=cr.AwsSdkCall(
                service="CognitoIdentityServiceProvider",
                action="describeUserPoolClient",
                parameters={
                    "UserPoolId": cognito_user_pool.user_pool_id,
                    "ClientId": cognito_user_pool_client.user_pool_client_id,
                },
                physical_resource_id=cr.PhysicalResourceId.of(
                    cognito_user_pool_client.user_pool_client_id),
            ),
            install_latest_aws_sdk=True,
            log_retention=logs.RetentionDays.ONE_WEEK,
        )

        cognito_user_pool_client_secret = (
            describe_cognito_user_pool_client.get_response_field(
                "UserPoolClient.ClientSecret"))

        imported_create_configuration_lambda_role = iam.Role.from_role_arn(
            self,
            "ImportedCreateConfigurationFileLambdaRole",
            role_arn=create_configuration_lambda_role_arn,
        )

        jwt_secret = config_yaml["jwt_secret"]

        config_secret_dict = {
            "oidc_secrets": {
                "client_id": cognito_user_pool_client.user_pool_client_id,
                "secret": cognito_user_pool_client_secret,
                "client_scope": ["email", "openid"],
            },
            "jwt_secret": jwt_secret,
        }

        config_secret_yaml = yaml.dump(
            config_secret_dict,
            explicit_start=True,
            default_flow_style=False,
        )

        config_secret = cr.AwsCustomResource(
            self,
            "ConfigSecretResource",
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(
                resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE),
            on_update=cr.AwsSdkCall(
                service="SecretsManager",
                action="updateSecret",
                parameters={
                    "SecretId": CONFIG_SECRET_NAME,
                    "SecretString": config_secret_yaml,
                },
                physical_resource_id=cr.PhysicalResourceId.from_response(
                    "Name"),
            ),
            on_create=cr.AwsSdkCall(
                service="SecretsManager",
                action="createSecret",
                parameters={
                    "Name": CONFIG_SECRET_NAME,
                    "Description":
                    "Sensitive configuration parameters for ConsoleMe",
                    "SecretString": config_secret_yaml,
                },
                physical_resource_id=cr.PhysicalResourceId.from_response(
                    "Name"),
            ),
            on_delete=cr.AwsSdkCall(
                service="SecretsManager",
                action="deleteSecret",
                parameters={
                    "SecretId": CONFIG_SECRET_NAME,
                    "ForceDeleteWithoutRecovery": True,
                },
            ),
            install_latest_aws_sdk=True,
            log_retention=logs.RetentionDays.ONE_WEEK,
        )

        create_configuration_lambda = lambda_.Function(
            self,
            "CreateConfigurationFileLambda",
            code=lambda_.Code.from_asset("resources/create_config_lambda"),
            handler="index.handler",
            timeout=cdk.Duration.seconds(30),
            layers=[create_dependencies_layer(self, "create_config_lambda")],
            runtime=lambda_.Runtime.PYTHON_3_8,
            role=imported_create_configuration_lambda_role,
            environment={
                "DEPLOYMENT_BUCKET":
                s3_bucket_name,
                "OIDC_METADATA_URL":
                "https://cognito-idp." + self.region + ".amazonaws.com/" +
                cognito_user_pool.user_pool_id +
                "/.well-known/openid-configuration",
                "REDIS_HOST":
                redis.attr_redis_endpoint_address,
                "SES_IDENTITY_ARN":
                "arn:aws:ses:" + self.region + ":" + self.account +
                ":identity/" + domain_name,
                "SUPPORT_CHAT_URL":
                "https://discord.gg/nQVpNGGkYu",
                "APPLICATION_ADMIN":
                "consoleme_admin",
                "ACCOUNT_NUMBER":
                self.account,
                "ISSUER":
                domain_name,
                "SPOKE_ACCOUNTS":
                ",".join(spoke_accounts),
                "CONFIG_SECRET_NAME":
                CONFIG_SECRET_NAME,
            },
        )

        create_configuration_resource_provider = cr.Provider(
            self,
            "CreateConfigurationFileProvider",
            on_event_handler=create_configuration_lambda,
            log_retention=logs.RetentionDays.ONE_WEEK,
        )

        create_configuration_lambda_resource = cdk.CustomResource(
            self,
            "CreateConfigurationFile",
            service_token=create_configuration_resource_provider.service_token,
            removal_policy=cdk.RemovalPolicy.DESTROY,
            properties={"UUID": str(uuid4())},
        )

        create_configuration_lambda_resource.node.add_dependency(config_secret)
Пример #15
0
    def __init__(self, scope: core.Construct, id: str, cluster: eks.Cluster,
                 kafka: msk.CfnCluster, vpc: ec2.Vpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        pip.main([
            "install", "--system", "--target", "custom_resources/kafka/lib",
            "kafka-python"
        ])
        arn = cr.AwsCustomResource(
            self,
            'clusterArn',
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(resources=['*']),
            on_create=cr.AwsSdkCall(
                action='listClusters',
                service='Kafka',
                physical_resource_id=cr.PhysicalResourceId.of(
                    "ClusterNameFilter"),
                parameters={
                    "ClusterNameFilter": kafka.cluster_name,
                    "MaxResults": 1
                },
            ),
        )

        bootstraps = cr.AwsCustomResource(
            self,
            'clusterBootstraps',
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(resources=["*"]),
            on_create=cr.AwsSdkCall(
                action='getBootstrapBrokers',
                service='Kafka',
                physical_resource_id=cr.PhysicalResourceId.of("ClusterArn"),
                parameters={
                    "ClusterArn":
                    arn.get_response_field("ClusterInfoList.0.ClusterArn")
                },
            ),
        )

        manifests = []
        for namespace in self.node.try_get_context("kubernetes")['namespaces']:
            manifests.append({
                "apiVersion": "v1",
                "kind": "ConfigMap",
                "metadata": {
                    "name": "kafka",
                    "namespace": namespace
                },
                "data": {
                    "bootstrap":
                    bootstraps.get_response_field('BootstrapBrokerStringTls'),
                }
            })
        eks.KubernetesManifest(self,
                               "kafka-config",
                               cluster=cluster,
                               manifest=manifests)

        function = lbd.SingletonFunction(
            self,
            "KafkaConfigFunction",
            uuid="b09329a3-5206-46f7-822f-337da714aeac",
            code=lbd.Code.from_asset("custom_resources/kafka/"),
            handler="config.handler",
            runtime=lbd.Runtime.PYTHON_3_7,
            function_name="kafkaConfig",
            log_retention=logs.RetentionDays.ONE_DAY,
            security_group=ec2.SecurityGroup.from_security_group_id(
                self, "lambdaKafkaVPC", vpc.vpc_default_security_group),
            timeout=core.Duration.seconds(30),
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(one_per_az=True))

        provider = cr.Provider(self,
                               "KafkaConfigProvider",
                               on_event_handler=function,
                               log_retention=logs.RetentionDays.ONE_DAY)

        core.CustomResource(
            self,
            "KafkaLoadTopic",
            service_token=provider.service_token,
            properties={
                "bootstrap":
                bootstraps.get_response_field('BootstrapBrokerStringTls'),
                "topic":
                "load",
                "partitions":
                150,
                "replicas":
                1
            })

        core.CustomResource(
            self,
            "KafkaGenerateTopic",
            service_token=provider.service_token,
            properties={
                "bootstrap":
                bootstraps.get_response_field('BootstrapBrokerStringTls'),
                "topic":
                "generate",
                "partitions":
                200,
                "replicas":
                1
            })
Пример #16
0
    def __init__(self, scope: Construct, id: str, vpc: _ec2.IVpc,
                 codebucket: IBucket, s3_deploy, metrics) -> None:
        super().__init__(scope, id)

        self._metrics_mapping = CfnMapping(
            self,
            'AnonymousData',
            mapping={'SendAnonymousData': {
                'Data': 'Yes'
            }})
        self._metrics_condition = CfnCondition(
            self,
            'AnonymousDatatoAWS',
            expression=Fn.condition_equals(
                self._metrics_mapping.find_in_map('SendAnonymousData', 'Data'),
                'Yes'))

        self._helper_func = _lambda.SingletonFunction(
            self,
            'SolutionHelper',
            uuid='75248a81-9138-468c-9ba1-bca6c7137599',
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler='lambda_function.handler',
            description=
            'This function generates UUID for each deployment and sends anonymous data to the AWS Solutions team',
            code=_lambda.Code.from_bucket(bucket=codebucket,
                                          key='app_code/solution_helper.zip'),
            vpc=vpc,
            timeout=Duration.seconds(30))
        self._helper_func.add_dependency(s3_deploy)

        self._lambda_provider = _custom_resources.Provider(
            self,
            'LambdaProvider',
            on_event_handler=self._helper_func,
            vpc=vpc)

        self._uuid = CustomResource(
            self,
            'UUIDCustomResource',
            service_token=self._lambda_provider.service_token,
            properties={"Resource": "UUID"},
            resource_type="Custom::CreateUUID",
            removal_policy=RemovalPolicy.DESTROY)

        self._send_data = CustomResource(
            self,
            'SendDataCustomResource',
            service_token=self._lambda_provider.service_token,
            properties={
                "Resource": "AnonymousMetric",
                "UUID": self._uuid.get_att_string("UUID"),
                "Solution": metrics["Solution"],
                "Data": metrics
            },
            resource_type='Custom::AnonymousData',
            removal_policy=RemovalPolicy.DESTROY)
        self._send_data.node.add_dependency(self._uuid)

        Aspects.of(self._helper_func).add(Condition(self._metrics_condition))
        Aspects.of(self._uuid).add(Condition(self._metrics_condition))
        Aspects.of(self._send_data).add(Condition(self._metrics_condition))
Пример #17
0
    def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Validated require props.
        required_props_keys = ['CfOriginDomainName', 'Asg', 'HostedZoneName', 'WebsiteDns']
        for k in required_props_keys:
            if k not in props or not props[k]:
                raise ValueError("Required prop %s is not present" % k)

        # Create a custom resource that returns the IP of the host behind the autoscaling group
        asg = props['Asg']
        asg_ip_handler = lambda_.Function(
            self, 'GhostIpHandler',
            runtime=lambda_.Runtime.PYTHON_3_6,
            code=lambda_.Code.asset('lambda'),
            handler='ghost_ip.handler',
        )

        asg_ip_handler.add_to_role_policy(
            statement=iam.PolicyStatement(
                actions=['autoscaling:DescribeAutoScalingGroups', 'ec2:DescribeInstances'],
                resources=['*', '*'],
            )
        )

        asg_ip_provider = cr.Provider(
            self, 'GhostIpProvider',
            on_event_handler=asg_ip_handler,
        )

        asg_ip_resource = cfn.CustomResource(
            self, 'GhostIpResource',
            provider=asg_ip_provider,
            properties={
                'AsgName': asg.auto_scaling_group_name,
                'ts': time.time(), # this makes sure the function is invoked for every CFN update
            }
        )

        # Create R53 HZ and cf origin domain
        if 'ExistingHostedZoneId' in props and props['ExistingHostedZoneId']:
            hz = route53.HostedZone.from_hosted_zone_attributes(
                self, 'HostedZone', 
                zone_name=props['HostedZoneName'],
                hosted_zone_id=props['ExistingHostedZoneId'],
            )
        else:
            hz = route53.HostedZone(
                self, 'HostedZone',
                zone_name=props['HostedZoneName']
            )

        origin_rrset = route53.ARecord(
            self, 'OriginRecord',
            target=route53.RecordTarget.from_ip_addresses(asg_ip_resource.get_att_string('GhostIp')),
            record_name=props['CfOriginDomainName'],
            zone=hz,
        )

        # Create a CF distro
        acm_cert = acm.DnsValidatedCertificate(
            self, 'GhostAcmCert',
            hosted_zone=hz,
            domain_name=props['WebsiteDns'],
            region='us-east-1',
        )

        cf_distro = cf.CloudFrontWebDistribution(
            self, 'CfDistro',
            origin_configs=[cf.SourceConfiguration(
                custom_origin_source=cf.CustomOriginConfig(
                    domain_name=props['CfOriginDomainName'],
                    origin_protocol_policy=cf.OriginProtocolPolicy.HTTP_ONLY,
                ),
                behaviors=[cf.Behavior(is_default_behavior=True)],
            )],
            alias_configuration=cf.AliasConfiguration(
                names=[props['WebsiteDns']],
                acm_cert_ref=acm_cert.certificate_arn,
            ),
            default_root_object='',
        )

        # Create the top level website DNS pointing to the CF distro
        ghost_rrset = route53.CnameRecord(
            self, 'GhostDns',
            domain_name=cf_distro.domain_name,
            zone=hz,
            record_name=props['WebsiteDns'],
        )
Пример #18
0
    def __init__(self, scope: core.Construct, config: dict, id: str,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # create the securty group that will allow to connect to this instance
        # I am lazy and create only 1 SG that allows TCP 5432 from itself
        # database clients (lambda functions) will have TCP 5432 authorized for themselves too,
        # which is not necessary but harmless
        self.db_security_group = ec2.SecurityGroup(self,
                                                   "Database Security Group",
                                                   vpc=config['vpc'])
        self.db_security_group.add_ingress_rule(self.db_security_group,
                                                ec2.Port.tcp(5432))

        self.cluster = rds.DatabaseCluster(
            self,
            config['rds']['name'],
            engine=rds.DatabaseClusterEngine.aurora_postgres(
                version=rds.AuroraPostgresEngineVersion.VER_11_7),
            default_database_name=config['rds']['databaseName'],
            master_user=rds.Login(username=config['rds']['masterUsername']),
            instance_props=rds.InstanceProps(
                vpc=config['vpc'], security_groups=[self.db_security_group]))

        # Add Secrets Manager Password rotation
        self.cluster.add_rotation_single_user()

        # aurora serverless is not yet support by CDK, https://github.com/aws/aws-cdk/issues/929
        # escape hatch https://docs.aws.amazon.com/cdk/latest/guide/cfn_layer.html#cfn_layer_raw
        # cfn_aurora_cluster = cluster.node.default_child
        # cfn_aurora_cluster.add_override("Properties.EngineMode", "serverless")
        # cfn_aurora_cluster.add_override("Properties.EnableHttpEndpoint",True) # Enable Data API
        # cfn_aurora_cluster.add_override("Properties.ScalingConfiguration", {
        #     'AutoPause': True,
        #     'MaxCapacity': 4,
        #     'MinCapacity': 1,
        #     'SecondsUntilAutoPause': 600
        # })
        # cluster.node.try_remove_child('Instance1') # Remove 'Server' instance that isn't required for serverless Aurora

        # create a custom resource to initialize the data schema
        function = _lambda.Function(
            self,
            config['custom resource lambda']['name'],
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset('./custom_resources'),
            handler='app.on_event',
            vpc=config['vpc'],
            environment={
                'DB_SECRET_ARN': self.get_secret_arn(),
                'PYTHON_LOGLEVEL': 'DEBUG'
            },
            security_groups=[self.db_security_group])
        # add permission to access the secret
        function.add_to_role_policy(
            iam.PolicyStatement(resources=[self.get_secret_arn()],
                                actions=["secretsmanager:GetSecretValue"]))

        custom_resource_provider = cr.Provider(self,
                                               'Custom Resource Provider',
                                               on_event_handler=function)
        custom_resource = core.CustomResource(
            self,
            'Custom Resource',
            service_token=custom_resource_provider.service_token)

        # Tell CFN to wait for the database to be ready before ot create the custom resource
        custom_resource.node.add_dependency(self.cluster)
    def __init__(
        self,
        scope: cdk.Construct,
        construct_id: str,
        vpc: ec2.Vpc,
        domain: sagemaker.CfnDomain,
        **kwargs,
    ) -> None:
        super().__init__(scope, construct_id, **kwargs)

        studio_domain_id = (domain.attr_domain_id
                            )  # cdk.Fn.import_value("StudioDomainId")

        # Get the security group associated with the EFS volume managed by SageMaker Studio
        get_parameter = cr.AwsCustomResource(
            self,
            "GetEfsSgId",
            on_update={  # will also be called for a CREATE event
                "service": "EC2",
                "action": "describeSecurityGroups",
                "parameters": {
                    "Filters": [
                        {"Name": "vpc-id", "Values": [vpc.vpc_id]},
                        {
                            "Name": "group-name",
                            "Values": [
                                f"security-group-for-inbound-nfs-{studio_domain_id}"
                            ],
                        },
                    ]
                },
                "physical_resource_id": cr.PhysicalResourceId.of("GetEfsSgId"),
            },
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(
                resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE
            ),
        )
        sg_name = get_parameter.get_response_field("SecurityGroups.0.GroupId")
        sg_efs = ec2.SecurityGroup.from_security_group_id(
            self, "SG", security_group_id=sg_name)

        # We can now retrive a handler for the EFS volume
        StudioDomainEfsId = cdk.Fn.import_value("StudioDomainEfsId")
        studio_efs = efs.FileSystem.from_file_system_attributes(
            self,
            "StudioEFS",
            file_system_id=StudioDomainEfsId,
            security_group=sg_efs)

        # Create EFS access point to enable the lambda fn to mount the EFS volume
        efs_ap = efs.AccessPoint(
            self,
            "EfsAccessPoint",
            file_system=studio_efs,
            posix_user=efs.PosixUser(gid="0", uid="0"),
        )

        # Function that takes care of setting up the user environment
        self.lambda_fn = lambda_python.PythonFunction(
            self,
            "UserSetupLambdaFn",
            entry="populate_git_fn",
            index="populate_from_git.py",
            handler="on_event",
            vpc=vpc,
            layers=[
                lambda_.LayerVersion.from_layer_version_arn(
                    self,
                    "GitLayer",
                    layer_version_arn=
                    f"arn:aws:lambda:{self.region}:553035198032:layer:git-lambda2:8",
                ),
            ],
            filesystem=lambda_.FileSystem.from_efs_access_point(
                efs_ap, "/mnt/efs"),
            timeout=cdk.Duration.seconds(300),
            initial_policy=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=[
                        "sagemaker:DescribeUserProfile",
                    ],
                    resources=["*"],
                )
            ],
        )

        provider = cr.Provider(
            self,
            "Provider",
            on_event_handler=self.lambda_fn,
        )

        cdk.CfnOutput(
            self,
            "StudioUserProviderToken",
            value=provider.service_token,
            description="StudioUserProviderToken",
            export_name="StudioUserProviderToken",
        )

        self.provider = provider
    def __init__(self, scope: core.Construct, id: str,
                 log_bucket: _s3.Bucket,
                 config_table: _dynamodb.Table,
                 tshirt_size: str,
                 sink_bucket: _s3.Bucket,
                 vpc: _ec2.Vpc,
                 **kwargs) -> None:

        super().__init__(scope, id, **kwargs)

        service_role = _iam.Role(
            self, 'BatchEmrServiceRole',
            assumed_by=_iam.ServicePrincipal('elasticmapreduce.amazonaws.com')
        )

        service_role.add_managed_policy(_iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AmazonElasticMapReduceRole'))

        cluster_role = _iam.Role(
            self, 'BatchEmrClusterRole',
            assumed_by=_iam.ServicePrincipal("ec2.amazonaws.com")
        )

        _iam.Policy(
            self, 'BatchEmrClusterPolicy',
            statements=[
                _iam.PolicyStatement(
                    actions=[
                        "glue:CreateDatabase",
                        "glue:UpdateDatabase",
                        "glue:DeleteDatabase",
                        "glue:GetDatabase",
                        "glue:GetDatabases",
                        "glue:CreateTable",
                        "glue:UpdateTable",
                        "glue:DeleteTable",
                        "glue:GetTable",
                        "glue:GetTables",
                        "glue:GetTableVersions",
                        "glue:CreatePartition",
                        "glue:BatchCreatePartition",
                        "glue:UpdatePartition",
                        "glue:DeletePartition",
                        "glue:BatchDeletePartition",
                        "glue:GetPartition",
                        "glue:GetPartitions",
                        "glue:BatchGetPartition",
                        "glue:CreateUserDefinedFunction",
                        "glue:UpdateUserDefinedFunction",
                        "glue:DeleteUserDefinedFunction",
                        "glue:GetUserDefinedFunction",
                        "glue:GetUserDefinedFunctions",
                        "cloudwatch:PutMetricData",
                        "dynamodb:ListTables",
                        "s3:HeadBucket",
                        "ec2:Describe*",
                    ],
                    resources=['*']
                ),
                _iam.PolicyStatement(
                    actions=['s3:GetObject'],
                    resources=[
                        'arn:aws:s3:::' + ARA_BUCKET_NAME + BINARIES + DataGenConfig.DSDGEN_INSTALL_SCRIPT,
                        'arn:aws:s3:::' + ARA_BUCKET_NAME + BINARIES + DataGenConfig.JAR_FILE
                    ]
                ),
                _iam.PolicyStatement(
                    actions=['s3:PutObject'],
                    resources=[log_bucket.bucket_arn + "/data-generator/*"]
                ),
                _iam.PolicyStatement(
                    actions=[
                        "s3:AbortMultipartUpload",
                        "s3:CreateBucket",
                        "s3:DeleteObject",
                        "s3:GetBucketVersioning",
                        "s3:GetObject",
                        "s3:GetObjectTagging",
                        "s3:GetObjectVersion",
                        "s3:ListBucket",
                        "s3:ListBucketMultipartUploads",
                        "s3:ListBucketVersions",
                        "s3:ListMultipartUploadParts",
                        "s3:PutBucketVersioning",
                        "s3:PutObject",
                        "s3:PutObjectTagging"
                    ],
                    resources=[
                        sink_bucket.bucket_arn + '/*',
                        sink_bucket.bucket_arn

                    ]
                )
            ],
            roles=[cluster_role]
        )

        cluster_role.add_managed_policy(_iam.ManagedPolicy.from_aws_managed_policy_name('AmazonSSMManagedInstanceCore'))

        _iam.CfnInstanceProfile(
            self, 'BatchEmrClusterInstanceProfile',
            roles=[cluster_role.role_name],
            instance_profile_name=cluster_role.role_name
        )

        # Security Groups for the EMR cluster (private subnet)
        # https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-man-sec-groups.html#emr-sg-elasticmapreduce-master-private
        master_sg = _ec2.SecurityGroup(self, 'ElasticMapReduce-Master-Private', vpc=vpc)
        slave_sg = _ec2.SecurityGroup(self, 'ElasticMapReduce-Slave-Private', vpc=vpc)
        service_sg = _ec2.SecurityGroup(self, 'ElasticMapReduce-ServiceAccess', vpc=vpc, allow_all_outbound=False)

        # Service SG used by the proxy instance
        service_sg.add_ingress_rule(master_sg, _ec2.Port.tcp(9443))
        service_sg.add_egress_rule(master_sg, _ec2.Port.tcp(8443))
        service_sg.add_egress_rule(slave_sg, _ec2.Port.tcp(8443))

        # EMR Master
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_icmp())
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_tcp())
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_udp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_icmp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_tcp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_udp())
        master_sg.add_ingress_rule(service_sg, _ec2.Port.tcp(8443))

        # EMR Slave
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_icmp())
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_tcp())
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_udp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_icmp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_tcp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_udp())
        slave_sg.add_ingress_rule(service_sg, _ec2.Port.tcp(8443))

        with open('common/common_cdk/lambda/datagen_config.py', 'r') as f:
            lambda_source = f.read()

        configure_datagen_function = _lambda.SingletonFunction(
            self, 'BatchConfigureDatagenLambda',
            uuid="58a9a222-ff07-11ea-adc1-0242ac120002",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            function_name='datagen-config',
            environment={
                'TABLE_NAME': config_table.table_name,
                'JAR_LOCATION': BINARIES_LOCATION + DataGenConfig.JAR_FILE,
            },
            timeout=core.Duration.seconds(10)
        )

        configure_datagen_function.role.add_to_policy(
            _iam.PolicyStatement(
                actions=[
                    'dynamodb:GetItem',
                    'dynamodb:PutItem',
                ],
                resources=[config_table.table_arn]
            )
        )

        terminate_cluster = _sfn_tasks.EmrTerminateCluster(
            self, 'BatchDeleteCluster',
            cluster_id=_sfn.TaskInput.from_data_at("$.Emr.Cluster.Id").value,
            integration_pattern=_sfn.IntegrationPattern.RUN_JOB,
        )

        terminate_cluster_error = _sfn_tasks.EmrTerminateCluster(
            self, 'BatchDeleteClusterError',
            cluster_id=_sfn.TaskInput.from_data_at("$.Emr.Cluster.Id").value,
            integration_pattern=_sfn.IntegrationPattern.RUN_JOB,
        ).next(_sfn.Fail(self, 'StepFailure'))

        create_cluster = _sfn_tasks.EmrCreateCluster(
            self, "BatchCreateEMRCluster",
            name="BatchDatagenCluster",
            result_path="$.Emr",
            release_label='emr-5.30.1',
            log_uri=log_bucket.s3_url_for_object() + "/data-generator",
            cluster_role=cluster_role,
            service_role=service_role,
            bootstrap_actions=[
                _sfn_tasks.EmrCreateCluster.BootstrapActionConfigProperty(
                    name="dsdgen-install",
                    script_bootstrap_action=_sfn_tasks.EmrCreateCluster.ScriptBootstrapActionConfigProperty(
                        path=BINARIES_LOCATION + DataGenConfig.DSDGEN_INSTALL_SCRIPT,
                    )
                )
            ],
            applications=[
                _sfn_tasks.EmrCreateCluster.ApplicationConfigProperty(
                    name="spark"
                ),
                _sfn_tasks.EmrCreateCluster.ApplicationConfigProperty(
                    name="hadoop"
                )
            ],
            instances=_sfn_tasks.EmrCreateCluster.InstancesConfigProperty(
                emr_managed_master_security_group=master_sg.security_group_id,
                emr_managed_slave_security_group=slave_sg.security_group_id,
                service_access_security_group=service_sg.security_group_id,
                ec2_subnet_ids=vpc.select_subnets().subnet_ids,
                instance_fleets=[
                    _sfn_tasks.EmrCreateCluster.InstanceFleetConfigProperty(
                        instance_fleet_type=_sfn_tasks.EmrCreateCluster.InstanceRoleType.MASTER,
                        instance_type_configs=[
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5.xlarge',
                                weighted_capacity=1
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5a.xlarge',
                                weighted_capacity=1
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m4.xlarge',
                                weighted_capacity=1
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5d.xlarge',
                                weighted_capacity=1
                            ),
                        ],
                        launch_specifications=_sfn_tasks.EmrCreateCluster.InstanceFleetProvisioningSpecificationsProperty(
                            spot_specification=_sfn_tasks.EmrCreateCluster.SpotProvisioningSpecificationProperty(
                                timeout_action=_sfn_tasks.EmrCreateCluster.SpotTimeoutAction.SWITCH_TO_ON_DEMAND,
                                timeout_duration_minutes=5
                            )
                        ),
                        target_on_demand_capacity=0,
                        target_spot_capacity=1
                    ),
                    _sfn_tasks.EmrCreateCluster.InstanceFleetConfigProperty(
                        instance_fleet_type=_sfn_tasks.EmrCreateCluster.InstanceRoleType.CORE,
                        instance_type_configs=[
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5.xlarge',
                                weighted_capacity=1
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5.2xlarge',
                                weighted_capacity=2
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5a.xlarge',
                                weighted_capacity=1
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5a.2xlarge',
                                weighted_capacity=2
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m4.xlarge',
                                weighted_capacity=1
                            )
                        ],
                        launch_specifications=_sfn_tasks.EmrCreateCluster.InstanceFleetProvisioningSpecificationsProperty(
                            spot_specification=_sfn_tasks.EmrCreateCluster.SpotProvisioningSpecificationProperty(
                                timeout_action=_sfn_tasks.EmrCreateCluster.SpotTimeoutAction.SWITCH_TO_ON_DEMAND,
                                timeout_duration_minutes=5
                            )
                        ),
                        target_on_demand_capacity=0,
                        target_spot_capacity=DataGenConfig.BATCH_CLUSTER_SIZE[tshirt_size]

                    )
                ]
            )
        ).add_catch(handler=terminate_cluster_error, result_path="$.error")

        configure_datagen = _sfn_tasks.LambdaInvoke(
            self, "BatchConfigureDatagenTask",
            lambda_function=configure_datagen_function,
            payload=_sfn.TaskInput.from_text('{'
                                             '"Param": "batch_iterator",'
                                             '"Module": "batch",'
                                             '"SinkBucket": "'+sink_bucket.s3_url_for_object()+'",'
                                             '"Parallelism": "'+str(int(DataGenConfig.BATCH_DATA_SIZE[tshirt_size])*2)+'",'
                                             '"DataSize": "'+DataGenConfig.BATCH_DATA_SIZE[tshirt_size]+'",'
                                             '"TmpBucket": "fake-bucket"'
                                             '}'),
            result_path='$.Config'
        ).add_catch(handler=terminate_cluster_error, result_path="$.error")

        add_datagen_step = _sfn.CustomState(
            self, 'BatchAddDataGenStep',
            state_json={
                "Type": "Task",
                "Resource": "arn:aws:states:::elasticmapreduce:addStep.sync",
                "Parameters": {
                    "ClusterId.$": "$.Emr.Cluster.Id",
                    "Step": {
                        "Name": "DatagenStep",
                        "ActionOnFailure": "CONTINUE",
                        "HadoopJarStep": {
                            "Jar": "command-runner.jar",
                            "Args.$": "$.Config.Payload.StepParam"
                        }
                    }
                },
                "ResultPath": "$.Step",
                "Next": "BatchUpdateIterator",
                "Catch": [
                    {
                        "ErrorEquals": ["States.ALL"],
                        "Next": "BatchDeleteClusterError",
                        "ResultPath": "$.error"
                    }
                ]
            }
        )

        update_iterator = _sfn_tasks.DynamoUpdateItem(
            self, 'BatchUpdateIterator',
            table=config_table,
            key={
                'param': _sfn_tasks.DynamoAttributeValue.from_string('batch_iterator')
            },
            update_expression='SET iterator = if_not_exists(iterator, :start) + :inc',
            expression_attribute_values={
                ":inc": _sfn_tasks.DynamoAttributeValue.from_number(1),
                ":start": _sfn_tasks.DynamoAttributeValue.from_number(0)
            },
            result_path=_sfn.JsonPath.DISCARD
        )

        definition = configure_datagen \
            .next(create_cluster) \
            .next(add_datagen_step) \
            .next(update_iterator) \
            .next(terminate_cluster)

        datagen_stepfunctions = _sfn.StateMachine(
            self, "BatchDataGenStepFunctions",
            definition=definition,
            timeout=core.Duration.minutes(30)
        )

        datagen_stepfunctions.add_to_role_policy(
            _iam.PolicyStatement(
                actions=[
                    'elasticmapreduce:AddJobFlowSteps',
                    'elasticmapreduce:DescribeStep'
                ],
                resources=['*']
            )
        )
        datagen_stepfunctions.add_to_role_policy(
            _iam.PolicyStatement(
                actions= [
                    "iam:CreateServiceLinkedRole",
                    "iam:PutRolePolicy"
                ],
                resources=["arn:aws:iam::*:role/aws-service-role/elasticmapreduce.amazonaws.com*/AWSServiceRoleForEMRCleanup*"],
                conditions= {
                    "StringLike": {
                        "iam:AWSServiceName": [
                            "elasticmapreduce.amazonaws.com",
                            "elasticmapreduce.amazonaws.com.cn"
                        ]
                    }
                }
            )
        )

        step_trigger = _events.Rule(
            self, 'BatchSteptrigger',
            schedule=_events.Schedule.cron(minute='0/30',
                                           hour='*',
                                           month='*',
                                           week_day='*',
                                           year='*')
        )

        step_trigger.add_target(_events_targets.SfnStateMachine(machine=datagen_stepfunctions))

        with open('common/common_cdk/lambda/stepfunctions_trigger.py', 'r') as f:
            lambda_source = f.read()

        stepfunctions_trigger_lambda = _lambda.SingletonFunction(
            self, 'BatchStepFunctionsTriggerLambda',
            uuid="9597f6f2-f840-11ea-adc1-0242ac120002",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            function_name='stepfunctions-batch-datagen-trigger'
        )

        stepfunctions_trigger_lambda.role.add_to_policy(
            _iam.PolicyStatement(
                actions=["states:StartExecution"],
                resources=['*']
            )
        )

        trigger_step_lambda_provider = _custom_resources.Provider(
            self, 'StepFunctionsTriggerLambdaProvider',
            on_event_handler=stepfunctions_trigger_lambda
        )

        core.CustomResource(
            self, 'StepFunctionsTrigger',
            service_token=trigger_step_lambda_provider.service_token,
            properties={
                "stepArn": datagen_stepfunctions.state_machine_arn
            }
        )

        # terminate clusters
        with open('common/common_cdk/lambda/stepfunctions_terminate_emr.py', 'r') as f:
            lambda_source = f.read()

        sfn_terminate = _lambda.SingletonFunction(
            self, 'StepFuncTerminateBatch',
            uuid='58a9a422-ff07-11ea-adc1-0242ac120002',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            timeout=core.Duration.minutes(5)
        )

        sfn_terminate.role.add_to_policy(
            _iam.PolicyStatement(
                actions=[
                    'elasticmapreduce:ListClusters',
                    'elasticmapreduce:TerminateJobFlows',
                    'states:ListStateMachines',
                    'states:ListExecutions',
                    'states:StopExecution'
                ],
                resources=['*']
            )
        )

        sfn_terminate_provider = _custom_resources.Provider(
            self, 'StepFuncTerminateBatchLambdaProvider',
            on_event_handler=sfn_terminate
        )

        core.CustomResource(
            self, 'StepFuncTerminateBatchCustomResource',
            service_token=sfn_terminate_provider.service_token,
            properties={
                "state_machine": 'BatchDatagen'
            })
Пример #21
0
	def __init__(self, scope: core.Construct, id: str, cluster: eks.Cluster, **kwargs) -> None:
		super().__init__(scope, id, **kwargs)

		maps= []
		self.roles=[]

		ecr_policy = iam.PolicyStatement(
			actions=[
				"ecr:DescribeImages",
				"ecr:ListImages",
				"ecr:BatchDeleteImage"
			], 
			effect=iam.Effect.ALLOW, 
			resources=[
				"arn:aws:ecr:%s:%s:repository/%s" % (core.Stack.of(self).region, core.Stack.of(self).account, namespace) for namespace in self.node.try_get_context("kubernetes")['namespaces']
			]
		)

		function = lbd.SingletonFunction(
			self,
			"ECRDeleteImagesFunction",
			uuid="19411b0e-0e80-4ad4-a316-3235940775e4",
			code=lbd.Code.from_asset(
				"custom_resources/kubernetes/"
			),
			handler="config.handler",
			runtime=lbd.Runtime.PYTHON_3_7,
			function_name="kubernetesConfig",
			initial_policy=[ecr_policy],
			log_retention=logs.RetentionDays.ONE_DAY,
			timeout=core.Duration.seconds(30)
		)

		provider = cr.Provider(
			self, "ECRDeleteImagesFunctionProvider",
    	on_event_handler=function,
    	log_retention=logs.RetentionDays.ONE_DAY
		)


		repositores = []
		for namespace in self.node.try_get_context("kubernetes")['namespaces']: 
			manifest = cluster.add_manifest(
				"eksConfigNamespace-%s" % namespace,
				{
					"apiVersion": "v1",
					"kind": "Namespace",
					"metadata": {
						"name": namespace
					}
				}
			)

			sa = cluster.add_service_account(
				"service-account-%s" % namespace,
				name="statement-demo",
				namespace=namespace
			)
			sa.node.add_dependency(manifest)
			self.roles.append(sa.role)

			repository = ecr.Repository(
				self, ("repository-%s" % namespace),
				removal_policy=core.RemovalPolicy.DESTROY,
				repository_name=namespace,
				lifecycle_rules=[ecr.LifecycleRule(max_image_count=1)]
			)

			repositores.append(repository.repository_arn)

			maps.append({
				"apiVersion": "v1",
				"kind": "ConfigMap",
				"metadata": {
					"name": "application.properties",
					"namespace": namespace
				},
				"data": {
					"application-aws.properties":  Path("../%s/src/main/resources/application-aws.properties" % namespace).read_text()
				}
			})

			core.CustomResource(
				self, "ECRDeleteImagesFunction-%s" % namespace, 
				service_token=provider.service_token,
				properties={
					"repository": namespace
				}
			).node.add_dependency(repository)

		eks.KubernetesManifest(
			self, 
			"eksConfigMaps", 
			cluster=cluster, 
			manifest=maps
		)

		iam.Policy(
			self, "saPolicy", 
			force=True, 
			policy_name="EKSSAPolicy", 
			roles=self.roles, 
			statements=[
				iam.PolicyStatement(
					actions=["cloudwatch:PutMetricData"], 
					conditions={
						"StringEquals": {
							"cloudwatch:namespace": "statement12"
						},
					},
					resources=["*"]
				)
			]
		)
Пример #22
0
	def __init__(self, scope: core.Construct, id: str, elastic: Elastic, vpc: ec2.Vpc, roles: list, cluster: eks.Cluster, **kwargs) -> None:
		super().__init__(scope, id, **kwargs)

		sm_policy = iam.PolicyStatement(
			actions=["secretsmanager:GetSecretValue"], 
			effect=iam.Effect.ALLOW, 
			resources=[elastic.secret.secret_arn]
		)

		es_policy = iam.PolicyStatement(
			actions=["es:DescribeElasticsearchDomain"], 
			effect=iam.Effect.ALLOW, 
			resources=[elastic.domain.domain_arn]
		)

		function = lbd.SingletonFunction(
			self,
			"ElasticsearchConfigFunction",
			uuid="e579d5f9-1709-43ea-b75f-9d1452ca7690",
			code=lbd.Code.from_asset(
				"custom_resources/elasticsearch/"
			),
			handler="config.handler",
			runtime=lbd.Runtime.PYTHON_3_7,
			function_name="elasticsearchConfig",
			initial_policy=[sm_policy,es_policy],
			log_retention=logs.RetentionDays.ONE_DAY,
			security_group=ec2.SecurityGroup.from_security_group_id(self, "lambdaVPC", vpc.vpc_default_security_group),
			timeout=core.Duration.seconds(30),
			vpc=vpc,
			vpc_subnets=ec2.SubnetSelection(
				one_per_az=True
			)
		)

		provider = cr.Provider(
			self, "ElasticsearchConfigProvider",
    	on_event_handler=function,
    	log_retention=logs.RetentionDays.ONE_DAY
		)

		core.CustomResource(
			self, "ElasticSearchConfig", 
			service_token=provider.service_token,
			properties={
				"domain": elastic.domain.domain_name,
				"secret": elastic.secret.secret_arn,
				"roles": [role.role_arn for role in roles],
				"shards": self.node.try_get_context("elastic")['shards'],
				"user": boto3.client('sts').get_caller_identity().get('Arn'),
				"replicas": self.node.try_get_context("elastic")['replicas']
			}
		)

		manifests = []
		for namespace in self.node.try_get_context("kubernetes")['namespaces']:
			manifests.append({
				"apiVersion": "v1",
				"kind": "ConfigMap",
				"metadata": {
					"name": "elasticsearch",
					"namespace": namespace
				},
				"data": {
					"url": elastic.domain.domain_endpoint
				}
			})
		eks.KubernetesManifest(
			self, 
			"elastic-search-cm", 
			cluster=cluster,
			manifest=manifests
		)		
Пример #23
0
    def __init__(self, scope: cdk.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ### Parameters
        bootstrap_script_args = cdk.CfnParameter(self, 'BootstrapScriptArgs',
            type='String',
            default='',
            description='Space seperated arguments passed to the bootstrap script.'
        )

        # create a VPC
        vpc = ec2.Vpc(self, 'VPC', cidr='10.0.0.0/16', max_azs=99)

        # create a private and public subnet per vpc
        selection = vpc.select_subnets(
            subnet_type=ec2.SubnetType.PRIVATE
        )

        # Output created subnets
        for i, public_subnet in enumerate(vpc.public_subnets):
            cdk.CfnOutput(self, 'PublicSubnet%i' % i,  value=public_subnet.subnet_id)

        for i, private_subnet in enumerate(vpc.private_subnets):
            cdk.CfnOutput(self, 'PrivateSubnet%i' % i,  value=private_subnet.subnet_id)

        cdk.CfnOutput(self, 'VPCId',  value=vpc.vpc_id)

        # Create a Bucket
        bucket = s3.Bucket(self, "DataRepository")
        quickstart_bucket = s3.Bucket.from_bucket_name(self, 'QuickStartBucket', 'aws-quickstart')

        # Upload Bootstrap Script to that bucket
        bootstrap_script = assets.Asset(self, 'BootstrapScript',
            path='scripts/bootstrap.sh'
        )

        # Upload parallel cluster post_install_script to that bucket
        pcluster_post_install_script = assets.Asset(self, 'PclusterPostInstallScript',
            path='scripts/post_install_script.sh'
        )

        # Setup CloudTrail
        cloudtrail.Trail(self, 'CloudTrail', bucket=bucket)

        # Create a Cloud9 instance
        # Cloud9 doesn't have the ability to provide userdata
        # Because of this we need to use SSM run command
        cloud9_instance = cloud9.Ec2Environment(self, 'Cloud9Env', vpc=vpc, instance_type=ec2.InstanceType(instance_type_identifier='c5.large'))
        cdk.CfnOutput(self, 'URL',  value=cloud9_instance.ide_url)


        # Create a keypair in lambda and store the private key in SecretsManager
        c9_createkeypair_role = iam.Role(self, 'Cloud9CreateKeypairRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'))
        c9_createkeypair_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AWSLambdaBasicExecutionRole'))
        # Add IAM permissions to the lambda role
        c9_createkeypair_role.add_to_policy(iam.PolicyStatement(
            actions=[
                'ec2:CreateKeyPair',
                'ec2:DeleteKeyPair'
            ],
            resources=['*'],
        ))

        # Lambda for Cloud9 keypair
        c9_createkeypair_lambda = _lambda.Function(self, 'C9CreateKeyPairLambda',
            runtime=_lambda.Runtime.PYTHON_3_6,
            handler='lambda_function.handler',
            timeout=cdk.Duration.seconds(300),
            role=c9_createkeypair_role,
            code=_lambda.Code.asset('functions/source/c9keypair'),
        #    code=_lambda.Code.from_bucket(
        )

        c9_createkeypair_provider = cr.Provider(self, "C9CreateKeyPairProvider", on_event_handler=c9_createkeypair_lambda)

        c9_createkeypair_cr = cfn.CustomResource(self, "C9CreateKeyPair", provider=c9_createkeypair_provider,
            properties={
                'ServiceToken': c9_createkeypair_lambda.function_arn
            }
        )
        #c9_createkeypair_cr.node.add_dependency(instance_id)
        c9_ssh_private_key_secret = secretsmanager.CfnSecret(self, 'SshPrivateKeySecret',
             secret_string=c9_createkeypair_cr.get_att_string('PrivateKey')
        )

        # The iam policy has a <REGION> parameter that needs to be replaced.
        # We do it programmatically so future versions of the synth'd stack
        # template include all regions.
        with open('iam/ParallelClusterUserPolicy.json') as json_file:
            data = json.load(json_file)
            for s in data['Statement']:
                if s['Sid'] == 'S3ParallelClusterReadOnly':
                    s['Resource'] = []
                    for r in region_info.RegionInfo.regions:
                        s['Resource'].append('arn:aws:s3:::{0}-aws-parallelcluster*'.format(r.name))

            parallelcluster_user_policy = iam.CfnManagedPolicy(self, 'ParallelClusterUserPolicy', policy_document=iam.PolicyDocument.from_json(data))

        # Cloud9 IAM Role
        cloud9_role = iam.Role(self, 'Cloud9Role', assumed_by=iam.ServicePrincipal('ec2.amazonaws.com'))
        cloud9_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('AmazonSSMManagedInstanceCore'))
        cloud9_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('AWSCloud9User'))
        cloud9_role.add_managed_policy(iam.ManagedPolicy.from_managed_policy_arn(self, 'AttachParallelClusterUserPolicy', parallelcluster_user_policy.ref))
        cloud9_role.add_to_policy(iam.PolicyStatement(
            resources=['*'],
            actions=[
                'ec2:DescribeInstances',
                'ec2:DescribeVolumes',
                'ec2:ModifyVolume'
            ]
        ))
        cloud9_role.add_to_policy(iam.PolicyStatement(
            resources=[c9_ssh_private_key_secret.ref],
            actions=[
                'secretsmanager:GetSecretValue'
            ]
        ))

        bootstrap_script.grant_read(cloud9_role)
        pcluster_post_install_script.grant_read(cloud9_role)

        # Cloud9 User
        # user = iam.User(self, 'Cloud9User', password=cdk.SecretValue.plain_text('supersecretpassword'), password_reset_required=True)

        # Cloud9 Setup IAM Role
        cloud9_setup_role = iam.Role(self, 'Cloud9SetupRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'))
        cloud9_setup_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AWSLambdaBasicExecutionRole'))
        # Allow pcluster to be run in bootstrap
        cloud9_setup_role.add_managed_policy(iam.ManagedPolicy.from_managed_policy_arn(self, 'AttachParallelClusterUserPolicySetup', parallelcluster_user_policy.ref))

        # Add IAM permissions to the lambda role
        cloud9_setup_role.add_to_policy(iam.PolicyStatement(
            actions=[
                'cloudformation:DescribeStackResources',
                'ec2:AssociateIamInstanceProfile',
                'ec2:AuthorizeSecurityGroupIngress',
                'ec2:DescribeInstances',
                'ec2:DescribeInstanceStatus',
                'ec2:DescribeInstanceAttribute',
                'ec2:DescribeIamInstanceProfileAssociations',
                'ec2:DescribeVolumes',
                'ec2:DesctibeVolumeAttribute',
                'ec2:DescribeVolumesModifications',
                'ec2:DescribeVolumeStatus',
                'ssm:DescribeInstanceInformation',
                'ec2:ModifyVolume',
                'ec2:ReplaceIamInstanceProfileAssociation',
                'ec2:ReportInstanceStatus',
                'ssm:SendCommand',
                'ssm:GetCommandInvocation',
                's3:GetObject',
                'lambda:AddPermission',
                'lambda:RemovePermission',
                'events:PutRule',
                'events:DeleteRule',
                'events:PutTargets',
                'events:RemoveTargets',
            ],
            resources=['*'],
        ))

        cloud9_setup_role.add_to_policy(iam.PolicyStatement(
            actions=['iam:PassRole'],
            resources=[cloud9_role.role_arn]
        ))

        cloud9_setup_role.add_to_policy(iam.PolicyStatement(
            actions=[
                'lambda:AddPermission',
                'lambda:RemovePermission'
            ],
            resources=['*']
        ))

        # Cloud9 Instance Profile
        c9_instance_profile = iam.CfnInstanceProfile(self, "Cloud9InstanceProfile", roles=[cloud9_role.role_name])

        # Lambda to add Instance Profile to Cloud9
        c9_instance_profile_lambda = _lambda.Function(self, 'C9InstanceProfileLambda',
            runtime=_lambda.Runtime.PYTHON_3_6,
            handler='lambda_function.handler',
            timeout=cdk.Duration.seconds(900),
            role=cloud9_setup_role,
            code=_lambda.Code.asset('functions/source/c9InstanceProfile'),
        )

        c9_instance_profile_provider = cr.Provider(self, "C9InstanceProfileProvider",
            on_event_handler=c9_instance_profile_lambda,
        )

        instance_id = cfn.CustomResource(self, "C9InstanceProfile", provider=c9_instance_profile_provider,
            properties={
                'InstanceProfile': c9_instance_profile.ref,
                'Cloud9Environment': cloud9_instance.environment_id,
            }
        )
        instance_id.node.add_dependency(cloud9_instance)

        # Lambda for Cloud9 Bootstrap
        c9_bootstrap_lambda = _lambda.Function(self, 'C9BootstrapLambda',
            runtime=_lambda.Runtime.PYTHON_3_6,
            handler='lambda_function.handler',
            timeout=cdk.Duration.seconds(900),
            role=cloud9_setup_role,
            code=_lambda.Code.asset('functions/source/c9bootstrap'),
        )

        c9_bootstrap_provider = cr.Provider(self, "C9BootstrapProvider", on_event_handler=c9_bootstrap_lambda)

        c9_bootstrap_cr = cfn.CustomResource(self, "C9Bootstrap", provider=c9_bootstrap_provider,
            properties={
                'Cloud9Environment': cloud9_instance.environment_id,
                'BootstrapPath': 's3://%s/%s' % (bootstrap_script.s3_bucket_name, bootstrap_script.s3_object_key),
                'BootstrapArguments': bootstrap_script_args,
                'VPCID': vpc.vpc_id,
                'MasterSubnetID': vpc.public_subnets[0].subnet_id,
                'ComputeSubnetID': vpc.private_subnets[0].subnet_id,
                'PostInstallScriptS3Url':  "".join( ['s3://', pcluster_post_install_script.s3_bucket_name,  "/", pcluster_post_install_script.s3_object_key ] ),
                'PostInstallScriptBucket': pcluster_post_install_script.s3_bucket_name,
                'KeyPairId':  c9_createkeypair_cr.ref,
                'KeyPairSecretArn': c9_ssh_private_key_secret.ref
            }
        )
        c9_bootstrap_cr.node.add_dependency(instance_id)
        c9_bootstrap_cr.node.add_dependency(c9_createkeypair_cr)
        c9_bootstrap_cr.node.add_dependency(c9_ssh_private_key_secret)
Пример #24
0
    def __init__(self, scope: cdk.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Version of ParallelCluster for Cloud9.
        pcluster_version = cdk.CfnParameter(
            self,
            'ParallelClusterVersion',
            description=
            'Specify a custom parallelcluster version. See https://pypi.org/project/aws-parallelcluster/#history for options.',
            default='2.8.0',
            type='String',
            allowed_values=get_version_list('aws-parallelcluster'))

        # S3 URI for Config file
        config = cdk.CfnParameter(
            self,
            'ConfigS3URI',
            description='Set a custom parallelcluster config file.',
            default=
            'https://notearshpc-quickstart.s3.amazonaws.com/{0}/config.ini'.
            format(__version__))

        # Password
        password = cdk.CfnParameter(
            self,
            'UserPasswordParameter',
            description='Set a password for the hpc-quickstart user',
            no_echo=True)

        # create a VPC
        vpc = ec2.Vpc(
            self,
            'VPC',
            cidr='10.0.0.0/16',
            gateway_endpoints={
                "S3":
                ec2.GatewayVpcEndpointOptions(
                    service=ec2.GatewayVpcEndpointAwsService.S3),
                "DynamoDB":
                ec2.GatewayVpcEndpointOptions(
                    service=ec2.GatewayVpcEndpointAwsService.DYNAMODB)
            },
            max_azs=99)

        # create a private and public subnet per vpc
        selection = vpc.select_subnets(subnet_type=ec2.SubnetType.PRIVATE)

        # Output created subnets
        for i, public_subnet in enumerate(vpc.public_subnets):
            cdk.CfnOutput(self,
                          'PublicSubnet%i' % i,
                          value=public_subnet.subnet_id)

        for i, private_subnet in enumerate(vpc.private_subnets):
            cdk.CfnOutput(self,
                          'PrivateSubnet%i' % i,
                          value=private_subnet.subnet_id)

        cdk.CfnOutput(self, 'VPCId', value=vpc.vpc_id)

        # Create a Bucket
        data_bucket = s3.Bucket(self, "DataRepository")
        cdk.CfnOutput(self, 'DataRespository', value=data_bucket.bucket_name)
        cloudtrail_bucket = s3.Bucket(self, "CloudTrailLogs")
        quickstart_bucket = s3.Bucket.from_bucket_name(self,
                                                       'QuickStartBucket',
                                                       'aws-quickstart')

        # Upload Bootstrap Script to that bucket
        bootstrap_script = assets.Asset(self,
                                        'BootstrapScript',
                                        path='scripts/bootstrap.sh')

        # Upload parallel cluster post_install_script to that bucket
        pcluster_post_install_script = assets.Asset(
            self,
            'PclusterPostInstallScript',
            path='scripts/post_install_script.sh')

        # Upload parallel cluster post_install_script to that bucket
        pcluster_config_script = assets.Asset(self,
                                              'PclusterConfigScript',
                                              path='scripts/config.ini')

        # Setup CloudTrail
        cloudtrail.Trail(self, 'CloudTrail', bucket=cloudtrail_bucket)

        # Create a Cloud9 instance
        # Cloud9 doesn't have the ability to provide userdata
        # Because of this we need to use SSM run command
        cloud9_instance = cloud9.Ec2Environment(
            self,
            'ResearchWorkspace',
            vpc=vpc,
            instance_type=ec2.InstanceType(
                instance_type_identifier='c5.large'))
        cdk.CfnOutput(self,
                      'Research Workspace URL',
                      value=cloud9_instance.ide_url)

        # Create a keypair in lambda and store the private key in SecretsManager
        c9_createkeypair_role = iam.Role(
            self,
            'Cloud9CreateKeypairRole',
            assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'))
        c9_createkeypair_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                'service-role/AWSLambdaBasicExecutionRole'))
        # Add IAM permissions to the lambda role
        c9_createkeypair_role.add_to_policy(
            iam.PolicyStatement(
                actions=['ec2:CreateKeyPair', 'ec2:DeleteKeyPair'],
                resources=['*'],
            ))

        # Lambda for Cloud9 keypair
        c9_createkeypair_lambda = _lambda.Function(
            self,
            'C9CreateKeyPairLambda',
            runtime=_lambda.Runtime.PYTHON_3_6,
            handler='lambda_function.handler',
            timeout=cdk.Duration.seconds(300),
            role=c9_createkeypair_role,
            code=_lambda.Code.asset('functions/source/c9keypair'),
        )

        c9_createkeypair_provider = cr.Provider(
            self,
            "C9CreateKeyPairProvider",
            on_event_handler=c9_createkeypair_lambda)

        c9_createkeypair_cr = cfn.CustomResource(
            self,
            "C9CreateKeyPair",
            provider=c9_createkeypair_provider,
            properties={'ServiceToken': c9_createkeypair_lambda.function_arn})
        #c9_createkeypair_cr.node.add_dependency(instance_id)
        c9_ssh_private_key_secret = secretsmanager.CfnSecret(
            self,
            'SshPrivateKeySecret',
            secret_string=c9_createkeypair_cr.get_att_string('PrivateKey'))

        # The iam policy has a <REGION> parameter that needs to be replaced.
        # We do it programmatically so future versions of the synth'd stack
        # template include all regions.
        with open('iam/ParallelClusterUserPolicy.json') as json_file:
            data = json.load(json_file)
            for s in data['Statement']:
                if s['Sid'] == 'S3ParallelClusterReadOnly':
                    s['Resource'] = []
                    for r in region_info.RegionInfo.regions:
                        s['Resource'].append(
                            'arn:aws:s3:::{0}-aws-parallelcluster*'.format(
                                r.name))

            parallelcluster_user_policy = iam.CfnManagedPolicy(
                self,
                'ParallelClusterUserPolicy',
                policy_document=iam.PolicyDocument.from_json(data))

        # Cloud9 IAM Role
        cloud9_role = iam.Role(
            self,
            'Cloud9Role',
            assumed_by=iam.ServicePrincipal('ec2.amazonaws.com'))
        cloud9_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                'AmazonSSMManagedInstanceCore'))
        cloud9_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name('AWSCloud9User'))
        cloud9_role.add_managed_policy(
            iam.ManagedPolicy.from_managed_policy_arn(
                self, 'AttachParallelClusterUserPolicy',
                parallelcluster_user_policy.ref))
        cloud9_role.add_to_policy(
            iam.PolicyStatement(resources=['*'],
                                actions=[
                                    'ec2:DescribeInstances',
                                    'ec2:DescribeVolumes', 'ec2:ModifyVolume'
                                ]))
        cloud9_role.add_to_policy(
            iam.PolicyStatement(resources=[c9_ssh_private_key_secret.ref],
                                actions=['secretsmanager:GetSecretValue']))
        cloud9_role.add_to_policy(
            iam.PolicyStatement(
                actions=["s3:Get*", "s3:List*"],
                resources=[
                    "arn:aws:s3:::%s/*" % (data_bucket.bucket_name),
                    "arn:aws:s3:::%s" % (data_bucket.bucket_name)
                ]))

        bootstrap_script.grant_read(cloud9_role)
        pcluster_post_install_script.grant_read(cloud9_role)
        pcluster_config_script.grant_read(cloud9_role)

        # Admin Group
        admin_group = iam.Group(self, 'AdminGroup')
        admin_group.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                'AdministratorAccess'))
        admin_group.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                'AWSCloud9Administrator'))

        # PowerUser Group
        poweruser_group = iam.Group(self, 'PowerUserGroup')
        poweruser_group.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name('PowerUserAccess'))
        poweruser_group.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                'AWSCloud9Administrator'))

        # HPC User
        user = iam.CfnUser(
            self,
            'Researcher',
            groups=[admin_group.node.default_child.ref],
            login_profile=iam.CfnUser.LoginProfileProperty(
                password_reset_required=True,
                password=cdk.SecretValue.cfn_parameter(password).to_string()))

        create_user = cdk.CfnParameter(self,
                                       "CreateUser",
                                       default="false",
                                       type="String",
                                       allowed_values=['true', 'false'
                                                       ]).value_as_string
        user_condition = cdk.CfnCondition(self,
                                          "UserCondition",
                                          expression=cdk.Fn.condition_equals(
                                              create_user, "true"))
        user.cfn_options.condition = user_condition

        cdk.CfnOutput(self,
                      'UserLoginUrl',
                      value="".join([
                          "https://", self.account,
                          ".signin.aws.amazon.com/console"
                      ]),
                      condition=user_condition)
        cdk.CfnOutput(self,
                      'UserName',
                      value=user.ref,
                      condition=user_condition)

        # Cloud9 Setup IAM Role
        cloud9_setup_role = iam.Role(
            self,
            'Cloud9SetupRole',
            assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'))
        cloud9_setup_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                'service-role/AWSLambdaBasicExecutionRole'))
        # Allow pcluster to be run in bootstrap
        cloud9_setup_role.add_managed_policy(
            iam.ManagedPolicy.from_managed_policy_arn(
                self, 'AttachParallelClusterUserPolicySetup',
                parallelcluster_user_policy.ref))

        # Add IAM permissions to the lambda role
        cloud9_setup_role.add_to_policy(
            iam.PolicyStatement(
                actions=[
                    'cloudformation:DescribeStackResources',
                    'ec2:AssociateIamInstanceProfile',
                    'ec2:AuthorizeSecurityGroupIngress',
                    'ec2:DescribeInstances',
                    'ec2:DescribeInstanceStatus',
                    'ec2:DescribeInstanceAttribute',
                    'ec2:DescribeIamInstanceProfileAssociations',
                    'ec2:DescribeVolumes',
                    'ec2:DesctibeVolumeAttribute',
                    'ec2:DescribeVolumesModifications',
                    'ec2:DescribeVolumeStatus',
                    'ssm:DescribeInstanceInformation',
                    'ec2:ModifyVolume',
                    'ec2:ReplaceIamInstanceProfileAssociation',
                    'ec2:ReportInstanceStatus',
                    'ssm:SendCommand',
                    'ssm:GetCommandInvocation',
                    's3:GetObject',
                    'lambda:AddPermission',
                    'lambda:RemovePermission',
                    'events:PutRule',
                    'events:DeleteRule',
                    'events:PutTargets',
                    'events:RemoveTargets',
                    'cloud9:CreateEnvironmentMembership',
                ],
                resources=['*'],
            ))

        cloud9_setup_role.add_to_policy(
            iam.PolicyStatement(actions=['iam:PassRole'],
                                resources=[cloud9_role.role_arn]))

        cloud9_setup_role.add_to_policy(
            iam.PolicyStatement(
                actions=['lambda:AddPermission', 'lambda:RemovePermission'],
                resources=['*']))

        # Cloud9 Instance Profile
        c9_instance_profile = iam.CfnInstanceProfile(
            self, "Cloud9InstanceProfile", roles=[cloud9_role.role_name])

        # Lambda to add Instance Profile to Cloud9
        c9_instance_profile_lambda = _lambda.Function(
            self,
            'C9InstanceProfileLambda',
            runtime=_lambda.Runtime.PYTHON_3_6,
            handler='lambda_function.handler',
            timeout=cdk.Duration.seconds(900),
            role=cloud9_setup_role,
            code=_lambda.Code.asset('functions/source/c9InstanceProfile'),
        )

        c9_instance_profile_provider = cr.Provider(
            self,
            "C9InstanceProfileProvider",
            on_event_handler=c9_instance_profile_lambda,
        )

        instance_id = cfn.CustomResource(self,
                                         "C9InstanceProfile",
                                         provider=c9_instance_profile_provider,
                                         properties={
                                             'InstanceProfile':
                                             c9_instance_profile.ref,
                                             'Cloud9Environment':
                                             cloud9_instance.environment_id,
                                         })
        instance_id.node.add_dependency(cloud9_instance)

        # Lambda for Cloud9 Bootstrap
        c9_bootstrap_lambda = _lambda.Function(
            self,
            'C9BootstrapLambda',
            runtime=_lambda.Runtime.PYTHON_3_6,
            handler='lambda_function.handler',
            timeout=cdk.Duration.seconds(900),
            role=cloud9_setup_role,
            code=_lambda.Code.asset('functions/source/c9bootstrap'),
        )

        c9_bootstrap_provider = cr.Provider(
            self, "C9BootstrapProvider", on_event_handler=c9_bootstrap_lambda)

        c9_bootstrap_cr = cfn.CustomResource(
            self,
            "C9Bootstrap",
            provider=c9_bootstrap_provider,
            properties={
                'Cloud9Environment':
                cloud9_instance.environment_id,
                'BootstrapPath':
                's3://%s/%s' % (bootstrap_script.s3_bucket_name,
                                bootstrap_script.s3_object_key),
                'Config':
                config,
                'VPCID':
                vpc.vpc_id,
                'MasterSubnetID':
                vpc.public_subnets[0].subnet_id,
                'ComputeSubnetID':
                vpc.private_subnets[0].subnet_id,
                'PostInstallScriptS3Url':
                "".join([
                    's3://', pcluster_post_install_script.s3_bucket_name, "/",
                    pcluster_post_install_script.s3_object_key
                ]),
                'PostInstallScriptBucket':
                pcluster_post_install_script.s3_bucket_name,
                'S3ReadWriteResource':
                data_bucket.bucket_arn,
                'S3ReadWriteUrl':
                's3://%s' % (data_bucket.bucket_name),
                'KeyPairId':
                c9_createkeypair_cr.ref,
                'KeyPairSecretArn':
                c9_ssh_private_key_secret.ref,
                'UserArn':
                user.attr_arn,
                'PclusterVersion':
                pcluster_version.value_as_string
            })
        c9_bootstrap_cr.node.add_dependency(instance_id)
        c9_bootstrap_cr.node.add_dependency(c9_createkeypair_cr)
        c9_bootstrap_cr.node.add_dependency(c9_ssh_private_key_secret)
        c9_bootstrap_cr.node.add_dependency(data_bucket)

        enable_budget = cdk.CfnParameter(self,
                                         "EnableBudget",
                                         default="true",
                                         type="String",
                                         allowed_values=['true', 'false'
                                                         ]).value_as_string
        # Budgets
        budget_properties = {
            'budgetType': "COST",
            'timeUnit': "ANNUALLY",
            'budgetLimit': {
                'amount':
                cdk.CfnParameter(
                    self,
                    'BudgetLimit',
                    description=
                    'The initial budget for this project in USD ($).',
                    default=2000,
                    type='Number').value_as_number,
                'unit':
                "USD",
            },
            'costFilters': None,
            'costTypes': {
                'includeCredit': False,
                'includeDiscount': True,
                'includeOtherSubscription': True,
                'includeRecurring': True,
                'includeRefund': True,
                'includeSubscription': True,
                'includeSupport': True,
                'includeTax': True,
                'includeUpfront': True,
                'useAmortized': False,
                'useBlended': False,
            },
            'plannedBudgetLimits': None,
            'timePeriod': None,
        }

        email = {
            'notification': {
                'comparisonOperator': "GREATER_THAN",
                'notificationType': "ACTUAL",
                'threshold': 80,
                'thresholdType': "PERCENTAGE",
            },
            'subscribers': [{
                'address':
                cdk.CfnParameter(
                    self,
                    'NotificationEmail',
                    description=
                    'This email address will receive billing alarm notifications when 80% of the budget limit is reached.',
                    default='*****@*****.**').value_as_string,
                'subscriptionType':
                "EMAIL",
            }]
        }

        overall_budget = budgets.CfnBudget(
            self,
            "HPCBudget",
            budget=budget_properties,
            notifications_with_subscribers=[email],
        )
        overall_budget.cfn_options.condition = cdk.CfnCondition(
            self,
            "BudgetCondition",
            expression=cdk.Fn.condition_equals(enable_budget, "true"))
Пример #25
0
    def __init__(self, scope: core.Construct, id: str, log_bucket: _s3.Bucket,
                 config_table: _dynamodb.Table, tshirt_size: str,
                 sink_bucket: _s3.Bucket, web_sale_stream: str,
                 web_customer_stream: str, web_customer_address_stream: str,
                 kinesis_key: _kms.Key, vpc: _ec2.Vpc, **kwargs) -> None:

        super().__init__(scope, id, **kwargs)

        stack = core.Stack.of(self)

        stream_source_bucket = AutoEmptyBucket(
            self,
            'StreamSource',
            bucket_name='ara-stream-source-' + core.Aws.ACCOUNT_ID,
            uuid='95505f50-0276-11eb-adc1-0242ac120002')

        service_role = _iam.Role(
            self,
            'StreamEmrServiceRole',
            assumed_by=_iam.ServicePrincipal('elasticmapreduce.amazonaws.com'))

        service_role.add_managed_policy(
            _iam.ManagedPolicy.from_aws_managed_policy_name(
                'service-role/AmazonElasticMapReduceRole'))

        cluster_role = _iam.Role(
            self,
            'StreamEmrClusterRole',
            assumed_by=_iam.ServicePrincipal("ec2.amazonaws.com"))

        _iam.Policy(
            self,
            'StreamEmrClusterPolicy',
            statements=[
                _iam.PolicyStatement(actions=[
                    "glue:CreateDatabase",
                    "glue:UpdateDatabase",
                    "glue:DeleteDatabase",
                    "glue:GetDatabase",
                    "glue:GetDatabases",
                    "glue:CreateTable",
                    "glue:UpdateTable",
                    "glue:DeleteTable",
                    "glue:GetTable",
                    "glue:GetTables",
                    "glue:GetTableVersions",
                    "glue:CreatePartition",
                    "glue:BatchCreatePartition",
                    "glue:UpdatePartition",
                    "glue:DeletePartition",
                    "glue:BatchDeletePartition",
                    "glue:GetPartition",
                    "glue:GetPartitions",
                    "glue:BatchGetPartition",
                    "glue:CreateUserDefinedFunction",
                    "glue:UpdateUserDefinedFunction",
                    "glue:DeleteUserDefinedFunction",
                    "glue:GetUserDefinedFunction",
                    "glue:GetUserDefinedFunctions",
                    "cloudwatch:PutMetricData",
                    "dynamodb:ListTables",
                    "s3:HeadBucket",
                    "ec2:Describe*",
                ],
                                     resources=['*']),
                _iam.PolicyStatement(
                    actions=['s3:GetObject'],
                    resources=[
                        'arn:aws:s3:::' + ARA_BUCKET_NAME + BINARIES +
                        DataGenConfig.DSDGEN_INSTALL_SCRIPT, 'arn:aws:s3:::' +
                        ARA_BUCKET_NAME + BINARIES + DataGenConfig.JAR_FILE
                    ]),
                _iam.PolicyStatement(
                    actions=['s3:PutObject'],
                    resources=[log_bucket.bucket_arn + "/data-generator/*"]),
                _iam.PolicyStatement(
                    actions=[
                        "s3:AbortMultipartUpload", "s3:CreateBucket",
                        "s3:DeleteObject", "s3:GetBucketVersioning",
                        "s3:GetObject", "s3:GetObjectTagging",
                        "s3:GetObjectVersion", "s3:ListBucket",
                        "s3:ListBucketMultipartUploads",
                        "s3:ListBucketVersions", "s3:ListMultipartUploadParts",
                        "s3:PutBucketVersioning", "s3:PutObject",
                        "s3:PutObjectTagging"
                    ],
                    resources=[
                        sink_bucket.bucket_arn + '/*', sink_bucket.bucket_arn,
                        stream_source_bucket.bucket.bucket_arn + '/*',
                        stream_source_bucket.bucket.bucket_arn
                    ])
            ],
            roles=[cluster_role])

        cluster_role.add_managed_policy(
            _iam.ManagedPolicy.from_aws_managed_policy_name(
                'AmazonSSMManagedInstanceCore'))

        _iam.CfnInstanceProfile(self,
                                'StreamEmrClusterInstanceProfile',
                                roles=[cluster_role.role_name],
                                instance_profile_name=cluster_role.role_name)

        # Security Groups for the EMR cluster (private subnet)
        # https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-man-sec-groups.html#emr-sg-elasticmapreduce-master-private
        master_sg = _ec2.SecurityGroup(self,
                                       'ElasticMapReduce-Master-Private',
                                       vpc=vpc)
        slave_sg = _ec2.SecurityGroup(self,
                                      'ElasticMapReduce-Slave-Private',
                                      vpc=vpc)
        service_sg = _ec2.SecurityGroup(self,
                                        'ElasticMapReduce-ServiceAccess',
                                        vpc=vpc,
                                        allow_all_outbound=False)

        # Service SG used by the proxy instance
        service_sg.add_ingress_rule(master_sg, _ec2.Port.tcp(9443))
        service_sg.add_egress_rule(master_sg, _ec2.Port.tcp(8443))
        service_sg.add_egress_rule(slave_sg, _ec2.Port.tcp(8443))

        # EMR Master
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_icmp())
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_tcp())
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_udp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_icmp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_tcp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_udp())
        master_sg.add_ingress_rule(service_sg, _ec2.Port.tcp(8443))

        # EMR Slave
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_icmp())
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_tcp())
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_udp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_icmp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_tcp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_udp())
        slave_sg.add_ingress_rule(service_sg, _ec2.Port.tcp(8443))

        with open('common/common_cdk/lambda/datagen_config.py', 'r') as f:
            lambda_source = f.read()

        configure_datagen_function = _lambda.SingletonFunction(
            self,
            'StreamConfigureDatagenLambda',
            uuid="a9904dec-01cf-11eb-adc1-0242ac120002",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            function_name='stream-datagen-config',
            environment={
                'TABLE_NAME': config_table.table_name,
                'JAR_LOCATION': BINARIES_LOCATION + DataGenConfig.JAR_FILE,
            },
            timeout=core.Duration.seconds(10))

        configure_datagen_function.role.add_to_policy(
            _iam.PolicyStatement(actions=[
                'dynamodb:GetItem',
                'dynamodb:PutItem',
            ],
                                 resources=[config_table.table_arn]))

        emr_cluster = _emr.CfnCluster(
            self,
            'StreamEmrCluster',
            name="StreamDatagenCluster",
            job_flow_role=cluster_role.role_name,
            service_role=service_role.role_name,
            release_label='emr-5.30.1',
            visible_to_all_users=True,
            log_uri=log_bucket.s3_url_for_object() + "/data-generator",
            applications=[
                _emr.CfnCluster.ApplicationProperty(name='hadoop'),
                _emr.CfnCluster.ApplicationProperty(name='spark')
            ],
            bootstrap_actions=[
                _emr.CfnCluster.BootstrapActionConfigProperty(
                    name="dsdgen-install",
                    script_bootstrap_action=_emr.CfnCluster.
                    ScriptBootstrapActionConfigProperty(
                        path=BINARIES_LOCATION +
                        DataGenConfig.DSDGEN_INSTALL_SCRIPT))
            ],
            instances=_emr.CfnCluster.JobFlowInstancesConfigProperty(
                emr_managed_master_security_group=master_sg.security_group_id,
                emr_managed_slave_security_group=slave_sg.security_group_id,
                service_access_security_group=service_sg.security_group_id,
                ec2_subnet_id=vpc.private_subnets[0].subnet_id,
                core_instance_group=_emr.CfnCluster.
                InstanceGroupConfigProperty(instance_count=DataGenConfig.
                                            BATCH_CLUSTER_SIZE[tshirt_size],
                                            instance_type='m5.xlarge'),
                master_instance_group=_emr.CfnCluster.
                InstanceGroupConfigProperty(instance_count=1,
                                            instance_type='m4.large')))

        configure_datagen = _sfn_tasks.LambdaInvoke(
            self,
            "ConfigureDatagenTask",
            lambda_function=configure_datagen_function,
            payload=_sfn.TaskInput.from_text(
                '{'
                '"Param": "stream_iterator",'
                '"Module": "stream",'
                '"SinkBucket": "' + sink_bucket.s3_url_for_object() + '",'
                '"Parallelism": "' +
                str(int(DataGenConfig.STREAM_DATA_SIZE[tshirt_size]) * 2) +
                '",'
                '"DataSize": "' + DataGenConfig.STREAM_DATA_SIZE[tshirt_size] +
                '",'
                '"TmpBucket": "' +
                str(stream_source_bucket.bucket.s3_url_for_object()) + '"'
                '}'),
            result_path='$.Config')

        add_datagen_step = _sfn.CustomState(
            self,
            'StreamAddDataGenStep',
            state_json={
                "Type": "Task",
                "Resource": "arn:aws:states:::elasticmapreduce:addStep.sync",
                "Parameters": {
                    "ClusterId.$": "$.Emr.Cluster.Id",
                    "Step": {
                        "Name": "DatagenStep",
                        "ActionOnFailure": "CONTINUE",
                        "HadoopJarStep": {
                            "Jar": "command-runner.jar",
                            "Args.$": "$.Config.Payload.StepParam"
                        }
                    }
                },
                "ResultPath": "$.Step",
                "Next": "StreamUpdateIterator"
            })

        update_iterator = _sfn_tasks.DynamoUpdateItem(
            self,
            'StreamUpdateIterator',
            table=config_table,
            key={
                'param':
                _sfn_tasks.DynamoAttributeValue.from_string('stream_iterator')
            },
            update_expression=
            'SET iterator = if_not_exists(iterator, :start) + :inc',
            expression_attribute_values={
                ":inc": _sfn_tasks.DynamoAttributeValue.from_number(1),
                ":start": _sfn_tasks.DynamoAttributeValue.from_number(0)
            },
            result_path=_sfn.JsonPath.DISCARD)

        definition = configure_datagen \
            .next(add_datagen_step) \
            .next(update_iterator)

        datagen_stepfunctions = _sfn.StateMachine(
            self,
            "StreamDataGenStepFunctions",
            definition=definition,
            timeout=core.Duration.minutes(30))

        datagen_stepfunctions.add_to_role_policy(
            _iam.PolicyStatement(actions=[
                'elasticmapreduce:AddJobFlowSteps',
                'elasticmapreduce:DescribeStep'
            ],
                                 resources=['*']))

        step_trigger = _events.Rule(self,
                                    'StreamStepTrigger',
                                    schedule=_events.Schedule.cron(
                                        minute='0/10',
                                        hour='*',
                                        month='*',
                                        week_day='*',
                                        year='*'))

        step_trigger.add_target(
            _events_targets.SfnStateMachine(
                machine=datagen_stepfunctions,
                input=_events.RuleTargetInput.from_object({
                    "Emr": {
                        "Cluster": {
                            "Id": core.Fn.ref(emr_cluster.logical_id)
                        }
                    }
                })))

        with open('common/common_cdk/lambda/stepfunctions_trigger.py',
                  'r') as f:
            lambda_source = f.read()

        stepfunctions_trigger_lambda = _lambda.SingletonFunction(
            self,
            'StreamStepFunctionsTriggerLambda',
            uuid="cf042246-01d0-11eb-adc1-0242ac120002",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            function_name='stepfunctions-stream-datagen-trigger')

        stepfunctions_trigger_lambda.role.add_to_policy(
            _iam.PolicyStatement(actions=["states:StartExecution"],
                                 resources=['*']))

        trigger_step_lambda_provider = _custom_resources.Provider(
            self,
            'StreamStepFunctionsTriggerLambdaProvider',
            on_event_handler=stepfunctions_trigger_lambda)

        core.CustomResource(
            self,
            'StreamStepFunctionsTrigger',
            service_token=trigger_step_lambda_provider.service_token,
            properties={"stepArn": datagen_stepfunctions.state_machine_arn})

        with open('common/common_cdk/lambda/stream_generator.py', 'r') as f:
            lambda_source = f.read()

        sale_stream_generator_lambda = _lambda.Function(
            scope=self,
            id='WebSaleStreamGenerator',
            runtime=_lambda.Runtime.PYTHON_3_7,
            memory_size=2048,
            timeout=core.Duration.minutes(15),
            code=_lambda.Code.inline(lambda_source),
            handler='index.lambda_handler',
            environment={
                'REGION': core.Aws.REGION,
                'STREAM_NAME': web_sale_stream
            })

        stream_source_bucket.bucket.add_event_notification(
            _s3.EventType.OBJECT_CREATED,
            _s3_notifications.LambdaDestination(sale_stream_generator_lambda),
            _s3.NotificationKeyFilter(prefix='sale', suffix='csv'))

        sale_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(
                actions=[
                    "s3:DeleteObject",
                    "s3:GetObject",
                    "s3:ListBucket",
                ],
                resources=[
                    stream_source_bucket.bucket.bucket_arn + '/*',
                    stream_source_bucket.bucket.bucket_arn
                ]))

        sale_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(actions=["kinesis:PutRecords"],
                                 resources=[
                                     stack.format_arn(
                                         service='kinesis',
                                         resource='stream',
                                         resource_name=web_sale_stream)
                                 ]))

        sale_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(actions=['kms:GenerateDataKey'],
                                 resources=[
                                     stack.format_arn(
                                         service='kms',
                                         resource='key',
                                         sep='/',
                                         resource_name=kinesis_key.key_id)
                                 ]))

        customer_stream_generator_lambda = _lambda.Function(
            scope=self,
            id='WebCustomerStreamGenerator',
            runtime=_lambda.Runtime.PYTHON_3_7,
            memory_size=2048,
            timeout=core.Duration.minutes(15),
            code=_lambda.Code.inline(lambda_source),
            handler='index.lambda_handler',
            environment={
                'REGION': core.Aws.REGION,
                'STREAM_NAME': web_customer_stream
            })

        stream_source_bucket.bucket.add_event_notification(
            _s3.EventType.OBJECT_CREATED,
            _s3_notifications.LambdaDestination(
                customer_stream_generator_lambda),
            _s3.NotificationKeyFilter(prefix='customer', suffix='csv'))

        customer_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(
                actions=[
                    "s3:DeleteObject",
                    "s3:GetObject",
                    "s3:ListBucket",
                ],
                resources=[
                    stream_source_bucket.bucket.bucket_arn + '/*',
                    stream_source_bucket.bucket.bucket_arn
                ]))

        customer_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(actions=["kinesis:PutRecords"],
                                 resources=[
                                     stack.format_arn(
                                         service='kinesis',
                                         resource='stream',
                                         resource_name=web_customer_stream)
                                 ]))

        customer_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(actions=['kms:GenerateDataKey'],
                                 resources=[
                                     stack.format_arn(
                                         service='kms',
                                         resource='key',
                                         sep='/',
                                         resource_name=kinesis_key.key_id)
                                 ]))

        address_stream_generator_lambda = _lambda.Function(
            scope=self,
            id='WebCustomerAddressStreamGenerator',
            runtime=_lambda.Runtime.PYTHON_3_7,
            memory_size=2048,
            timeout=core.Duration.minutes(15),
            code=_lambda.Code.inline(lambda_source),
            handler='index.lambda_handler',
            environment={
                'REGION': core.Aws.REGION,
                'STREAM_NAME': web_customer_address_stream
            })

        stream_source_bucket.bucket.add_event_notification(
            _s3.EventType.OBJECT_CREATED,
            _s3_notifications.LambdaDestination(
                address_stream_generator_lambda),
            _s3.NotificationKeyFilter(prefix='address', suffix='csv'))

        address_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(
                actions=[
                    "s3:DeleteObject",
                    "s3:GetObject",
                    "s3:ListBucket",
                ],
                resources=[
                    stream_source_bucket.bucket.bucket_arn + '/*',
                    stream_source_bucket.bucket.bucket_arn
                ]))

        address_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(
                actions=["kinesis:PutRecords"],
                resources=[
                    stack.format_arn(service='kinesis',
                                     resource='stream',
                                     resource_name=web_customer_address_stream)
                ]))

        address_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(actions=['kms:GenerateDataKey'],
                                 resources=[
                                     stack.format_arn(
                                         service='kms',
                                         resource='key',
                                         sep='/',
                                         resource_name=kinesis_key.key_id)
                                 ]))