コード例 #1
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        bucket = s3.Bucket(self, "BucketOne", bucket_name="jc-100-test-bucket")

        another_bucket = s3.Bucket(self, "BucketTwo", bucket_name="jc-200-test-bucket")

        lambda_function = lam.Function(
            self,
            "test-copy-bucket-cdk",
            runtime=lam.Runtime.PYTHON_3_7,
            events=[
                lam_events.S3EventSource(
                    bucket,
                    events=[
                        s3.EventType.OBJECT_CREATED_PUT,
                        s3.EventType.OBJECT_CREATED_COPY,
                    ],
                )
            ],
            handler="index.handle",
            code=lam.Code.from_asset("lambda"),
            environment={"DESTINATION_BUCKET": another_bucket.bucket_name},
            initial_policy=[
                iam.PolicyStatement(
                    actions=["s3:PutObject", "s3:GetObject"],
                    resources=[
                        bucket.bucket_arn,
                        bucket.arn_for_objects("*"),
                        another_bucket.bucket_arn,
                        another_bucket.arn_for_objects("*"),
                    ],
                )
            ],
        )
コード例 #2
0
    def __init__(self, scope: core.Construct, id: str, *, prefix: str,
                 environment: str, configuration, **kwargs):
        """
        :param scope: Stack class, used by CDK.
        :param id: ID of the construct, used by CDK.
        :param prefix: Prefix of the construct, used for naming purposes.
        :param environment: Environment of the construct, used for naming purposes.
        :param configuration: Configuration of the construct. In this case SNS_CONFIG_SCHEMA.
        :param kwargs: Other parameters that could be used by the construct.
        """
        super().__init__(scope, id, **kwargs)
        self.prefix = prefix
        self.environment_ = environment
        self._configuration = configuration

        # Validating that the payload passed is correct
        validate_configuration(configuration_schema=S3_LAMBDA_CONFIG_SCHEMA,
                               configuration_received=self._configuration)

        # Defining S3 Bucket
        bucket_data = deepcopy(self._configuration["bucket"])
        self._s3_bucket = base_bucket(self, **bucket_data)

        # Validating Lambda Function Runtime
        functions_data = self._configuration["lambda_handler"]
        self._lambda_function = base_lambda_function(self, **functions_data)

        # Defining the Lambda subscription to the specified S3 Bucket in cdk.json file.
        s3_events = self._configuration["events"]
        event_list = enum_parsing(source_list=s3_events,
                                  target_enum=s3.EventType)

        s3_subscription = events.S3EventSource(bucket=self._s3_bucket,
                                               events=event_list)
        self._lambda_function.add_event_source(source=s3_subscription)
コード例 #3
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # S3 bucket used for Lambda function code
        lambda_code_bucket = s3.Bucket(self, "IntelixLambda")
        # S3 bucket used by application - this will contain objects to be scanned
        input_bucket = s3.Bucket(self, "IntelixLambdaInput")
        # S3 bucket used by application output - this will contain objects that have been scanned and pass as non-malware
        output_bucket = s3.Bucket(self, "IntelixLambdaOutput")

        # Core Lambda function
        handler = lambda_.Function(
            self,
            "IntelixLambdaHandler",
            runtime=lambda_.Runtime.PYTHON_3_7,
            code=lambda_.Code.from_asset("resources"),
            handler="intelix_file_check.file_check_handler",
            timeout=core.Duration.seconds(900),
            environment=dict(BUCKET=lambda_code_bucket.bucket_name,
                             INTELIX_CREDENTIALS=
                             "<YOUR BASE64 CODED CLIENT_ID:CLIENT_SECRET>",
                             OUTPUT_BUCKET=output_bucket.bucket_name))
        lambda_code_bucket.grant_read_write(handler)
        input_bucket.grant_read_write(handler)
        output_bucket.grant_read_write(handler)

        # Event Trigger - upon object created in input_bucket call the lambda
        handler.add_event_source(
            aws_lambda_event_sources.S3EventSource(
                input_bucket, events=[s3.EventType.OBJECT_CREATED]))
コード例 #4
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create S3 bucket here.
        bucket = aws_s3.Bucket(self, image_bucket_name)

        # CFN output for S3 bucket creation.
        core.CfnOutput(
            self,
            "image-bucket",
            value=bucket.bucket_name,
            description="Bucket for uploading images",
        )

        # Create dynamodb table for storing image labels here.
        table = aws_dynamodb.Table(
            self,
            "image-lables",
            partition_key=aws_dynamodb.Attribute(
                name="image", type=aws_dynamodb.AttributeType.STRING
            ),
        )

        # CFN output for dynamodb table creation.
        core.CfnOutput(
            self,
            "image-lables-ddb-table",
            value=table.table_name,
            description="DDB table for storing image lables.",
        )

        function = aws_lambda.Function(
            self,
            "rekognitionFunction",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            handler="handler.main",
            code=aws_lambda.Code.asset("./rekognitionLambda"),
            timeout=core.Duration.seconds(30),
            memory_size=1024,
        )
        function.add_environment("TABLE", table.table_name)
        function.add_environment("BUCKET", bucket.bucket_name)
        function.add_event_source(
            aws_lambda_event_sources.S3EventSource(
                bucket=bucket, events=[aws_s3.EventType.OBJECT_CREATED]
            )
        )
        bucket.grant_read(function)
        table.grant_write_data(function)

        function.add_to_role_policy(
            statement=aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                actions=["rekognition:DetectLabels"],
                resources=["*"],
            )
        )
コード例 #5
0
    def create_lambda_trigger_task_custom(self, vpc, **kwargs):
        ####################
        # Unpack Value
        app_name = kwargs['function'].replace("_", "-")
        lambda_name = "{}-lambda".format(app_name)
        code_name = "{}".format(app_name)
        trigger_bucket_name = "{}-bucket-event".format(app_name)
        train_bucket_name = "{}-bucket-model".format(app_name)
        job_name = "{}-job".format(app_name)
        task = "{}-task".format(app_name)
        instance = kwargs['instance']
        image_uri = kwargs['image_uri']

        # Config role
        lambda_base_role = iam.Role(
            self,
            "gw_lambda_train_graph_role",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"))
        #lambda_base_role.add_managed_policy(iam.ManagedPolicy.from_managed_policy_name("AWSLambdaBasicExectutionRole", "arn:aws:iam:aws:policy/service-role/AWSLambdaBasicExecutionRole"))
        #lambda_base_role.add_managed_policy(iam.ManagedPolicy.from_managed_policy_name("AWSLambdaVPCAcessExecutionRole", "arn:aws:iam:aws:policy/service-role/AWSLambdaVPCAccessExecutionRole"))
        #lambda_base_role.add_managed_policy(iam.ManagedPolicy.from_managed_policy_name("AmazonSageMakerFullAccess", "arn:aws:iam:aws:policy/AmazonSageMakerFullAccess"))
        lambda_base_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "service-role/AWSLambdaBasicExecutionRole"))
        lambda_base_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "service-role/AWSLambdaVPCAccessExecutionRole"))
        lambda_base_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonSageMakerFullAccess"))

        # Create Lambda
        lambda_app = _lambda.Function(self,
                                      lambda_name,
                                      handler='{}.handler'.format(code_name),
                                      runtime=_lambda.Runtime.PYTHON_3_7,
                                      code=_lambda.Code.asset('lambda'),
                                      role=lambda_base_role,
                                      environment={
                                          'BUCKET': train_bucket_name,
                                          'JOB_NAME': job_name,
                                          'INSTANCE': instance,
                                          'IMAGE_URI': image_uri,
                                          'TASK': task,
                                          "REDIS_URL": self.redis_host,
                                          "REDIS_PORT": self.redis_port
                                      })
        # Create an S3 event soruce for Lambda
        bucket = s3.Bucket(self, trigger_bucket_name)
        s3_event_source = lambda_event_source.S3EventSource(
            bucket, events=[s3.EventType.OBJECT_CREATED])
        lambda_app.add_event_source(s3_event_source)

        return lambda_app
コード例 #6
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        lambda_layer = aws_lambda.LayerVersion(
            self,
            "pillow_layer",
            code=aws_lambda.Code.asset("./python-pillow-6.2.1.zip"),
            compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_7],
            description="Pillow_upload_from_cdk")
        # aws_lambda.LayerVersion.from_layer_version_arn(self, "pillow_layer",
        #                                             layer_version_arn="arn:aws-cn:lambda:cn-northwest-1:313497334385:layer:pillow:4"
        #                                             )

        # The code that defines your stack goes here
        mylambda = aws_lambda.Function(
            self,
            "myfunction_id",
            description="lambda trigger by S3 to convert image",
            # function_name="img-process-cdk-deploy",  # the name will be auto create
            code=aws_lambda.Code.asset("./lambda"),
            handler="lambda_function.lambda_handler",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            timeout=core.Duration.seconds(60),
            memory_size=256,
            reserved_concurrent_executions=10,
            layers=[lambda_layer],
            dead_letter_queue_enabled=True,  # cdk will create a new SQS for this
            # role = aws_iam.Role.from_role_arn(self, "mylambda_role",
            #         role_arn="arn:aws-cn:iam::313497334385:role/Lambda-access-img-process-S3bucket"
            #         )  # cdk will create a role for this
        )

        with open('./env.js', 'r') as f:
            env = json.load(f)
            for (k, v) in env.items():
                mylambda.add_environment(k, v)

        #create a new bucket, Import bucket can not add event trigger
        s3_bucket = aws_s3.Bucket(
            self,
            "mybucket",
            # bucket name will be auto created or define here
            # bucket_name="img-process-cdk-created"
        )
        mylambda.add_event_source(
            aws_lambda_event_sources.S3EventSource(
                s3_bucket,
                events=[aws_s3.EventType.OBJECT_CREATED],
                filters=[aws_s3.NotificationKeyFilter(prefix='input/')]))
        s3_bucket.grant_read_write(mylambda)
コード例 #7
0
    def create_trigger_training_task(self, **kwargs):
        ####################
        # Unpack Value
        name = kwargs['name'].replace("_", "-")
        lambda_name = "{}-lambda".format(name)
        code_name = "{}".format(name)
        job_name = "{}-job".format(name)
        task = "{}-task".format(name)
        instance = kwargs['instance']
        image_uri = kwargs['image_uri']
        trigger_bucket = kwargs['trigger_bucket']
        input_train_bucket = kwargs['input_train_bucket']
        input_validation_bucket = kwargs['input_validation_bucket']
        # hparams = kwargs['hparams']
        output_bucket = kwargs['output_bucket']
        lambda_train_role = kwargs['lambda_role']
        sagemaker_train_role = kwargs['sagemaker_role'].role_arn

        # Create Lambda
        lambda_app = _lambda.Function(
            self,
            lambda_name,
            handler='{}.handler'.format(code_name),
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda'),
            role=lambda_train_role,
            environment={
                'INPUT_TRAIN_BUCKET': input_train_bucket,
                'INPUT_VALIDATION_BUCKET': input_validation_bucket,
                'OUTPUT_BUCKET': output_bucket,
                'NAME': name,
                'IMAGE_URI': image_uri,
                'SAGEMAKER_ROLE': sagemaker_train_role,
                # 'HPARAMS': hparams,
                'INSTANCE': instance
            })
        # Create an S3 event soruce for Lambda
        bucket = s3.Bucket(self, trigger_bucket)
        s3_event_source = lambda_event_source.S3EventSource(
            bucket, events=[s3.EventType.OBJECT_CREATED])
        lambda_app.add_event_source(s3_event_source)

        return lambda_app
コード例 #8
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        my_s3_bucket = _s3.Bucket(self, id='myS3Bucket')

        my_sns_topic = _sns.Topic(\
            self,
            id = 'demoTopic'
        )

        my_sns_sub = _sns_subscriptions.EmailSubscription(
            "*****@*****.**")
        my_sns_sub.bind(my_sns_topic)

        my_function = _lambda.Function(self,
                                       id='demoFunction',
                                       code=_lambda.Code.asset(r'../src'),
                                       handler='fun01.handler')
        trigger_event = _event.S3EventSource(
            bucket=my_s3_bucket, events=[_s3.EventType.OBJECT_CREATED])
コード例 #9
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        input_bucket = s3.Bucket(
            self,
            "input-bucket"
        )

        metadata_table = dynamodb.Table(
            self,
            "metadata-table",
            billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST,
            partition_key=dynamodb.Attribute(name="PK",type=dynamodb.AttributeType.STRING)
        )

        dependency_layer_path = build_layer_package(
            requirement_files=[os.path.join(os.path.dirname(__file__), "..", "src", "requirements.txt")]
        )

        dependency_layer = _lambda.LayerVersion(
            self,
            "dependency-layer",
            code=_lambda.Code.from_asset(path=dependency_layer_path),
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_7, _lambda.Runtime.PYTHON_3_8]
        )

        lambda_package_path = build_lambda_package(
            code_directories=[os.path.join(os.path.dirname(__file__), "..", "src")],
        )

        processing_lambda = _lambda.Function(
            self,
            "processing-lambda",
            code=_lambda.Code.from_asset(path=lambda_package_path),
            handler="src.processing_lambda.lambda_handler",
            runtime=_lambda.Runtime.PYTHON_3_8,
            timeout=core.Duration.seconds(30),
            environment={
                "METADATA_TABLE_NAME": metadata_table.table_name
            },
            layers=[dependency_layer],
            description="Triggers object recognition on an S3 object and stores the metadata",
            tracing=_lambda.Tracing.ACTIVE
        )

        # Allow Lambda to talk to Rekognition
        processing_lambda.add_to_role_policy(
            iam.PolicyStatement(
                actions=["rekognition:DetectLabels"],
                resources=["*"]
            )
        )

        # Create the S3 Event Trigger for Lambda
        processing_lambda.add_event_source(
            lambda_event_sources.S3EventSource(
                input_bucket,
                events=[s3.EventType.OBJECT_CREATED]
            )
        )

        input_bucket.grant_read(processing_lambda)
        metadata_table.grant_read_write_data(processing_lambda)
コード例 #10
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        """
        initialize function for CDK
        """
        super().__init__(scope, construct_id, **kwargs)

        # -------------------------------
        # S3 Bucket for Manifests
        # -------------------------------

        qs_gov_bucket = s3.Bucket(
            self,
            id=f"{cf.PROJECT}-ManifestBucket",
        )
        bucket_name = qs_gov_bucket.bucket_name

        # -------------------------------
        # IAM
        # -------------------------------

        list_roles_policy = iam.ManagedPolicy(
            self,
            id=f"{cf.PROJECT}-ListRolesPolicy",
            description=None,
            managed_policy_name=None,
            path="/",
            statements=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                    actions=["iam:ListRoles", "iam:ListAccountAliases"],
                )
            ],
        )

        federated_quicksight_policy = iam.ManagedPolicy(
            self,
            id=f"{cf.PROJECT}-FederatedQuickSightPolicy",
            managed_policy_name=f"{cf.PROJECT}-FederatedQuickSightPolicy",
            statements=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=[
                        f"arn:aws:iam::{cf.ACCOUNT}:saml-provider/{cf.OKTA_IDP_NAME}"
                    ],
                    actions=["sts:AssumeRoleWithSAML"],
                    conditions={
                        "StringEquals": {
                            "saml:aud": "https://signin.aws.amazon.com/saml"
                        }
                    },
                )
            ],
        )

        okta_federated_principal = iam.FederatedPrincipal(
            federated=
            f"arn:aws:iam::{cf.ACCOUNT}:saml-provider/{cf.OKTA_IDP_NAME}",
            assume_role_action="sts:AssumeRoleWithSAML",
            conditions={
                "StringEquals": {
                    "SAML:aud": "https://signin.aws.amazon.com/saml"
                }
            },
        )

        federated_quicksight_role = iam.Role(
            self,
            id=f"{cf.PROJECT}-{cf.OKTA_ROLE_NAME}",
            role_name=f"{cf.PROJECT}-{cf.OKTA_ROLE_NAME}",
            assumed_by=okta_federated_principal,
            description=
            "Allow Okta to Federate Login & User Creation to QuickSight",
            managed_policies=[federated_quicksight_policy],
        )

        iam.User(
            self,
            id=f"{cf.PROJECT}-OktaSSOUser",
            user_name=f"{cf.PROJECT}-OktaSSOUser",
            managed_policies=[list_roles_policy],
        )

        # -------------------------------
        # Lambda Functions
        # -------------------------------

        # iam role for Lambdas

        qs_governance_policy = iam.ManagedPolicy(
            self,
            id=f"{cf.PROJECT}-QuickSightGovernancePolicy",
            managed_policy_name=f"{cf.PROJECT}-QuickSightGovernancePolicy",
            statements=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=[
                        f"arn:aws:secretsmanager:{cf.REGION}:{cf.ACCOUNT}:secret:{cf.OKTA_SECRET}*"
                    ],
                    actions=[
                        "secretsmanager:GetSecretValue",
                    ],
                ),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                    actions=["quicksight:*", "ds:*"],
                ),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=[f"arn:aws:s3:::{bucket_name}/*"],
                    actions=["s3:Get*", "s3:Put*"],
                ),
            ],
        )

        quicksight_permission_mapping_role = iam.Role(
            self,
            id=f"{cf.PROJECT}-QuickSightPermissionMappingRole",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaBasicExecutionRole"),
                qs_governance_policy,
            ],
        )

        # Lambdas

        get_okta_info_lambda = _lambda.Function(
            self,
            id=f"{cf.PROJECT}-GetOktaInfo",
            handler="get_okta_info.handler",
            role=quicksight_permission_mapping_role,
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset(os.path.join(cf.PATH_SRC, "pkg")),
            function_name=f"{cf.PROJECT}-GetOktaInfo",
            environment={
                "OKTA_SECRET": cf.OKTA_SECRET,
                "OKTA_ROLE_NAME": cf.OKTA_ROLE_NAME,
                "QS_GOVERNANCE_BUCKET": bucket_name,
                "QS_USER_GOVERNANCE_KEY": cf.QS_USER_GOVERNANCE_KEY,
            },
            memory_size=256,
            timeout=core.Duration.seconds(180),
        )

        # Lamda Okta to QuickSight Mappers

        qs_user_governance_lambda = _lambda.Function(
            self,
            id=f"{cf.PROJECT}-QSUserGovernance",
            handler="qs_user_gov.handler",
            role=quicksight_permission_mapping_role,
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset(os.path.join(cf.PATH_SRC, "pkg")),
            function_name=f"{cf.PROJECT}-QSUserGovernance",
            environment={
                "OKTA_ROLE_NAME": f"{cf.PROJECT}-{cf.OKTA_ROLE_NAME}",
                "QS_GOVERNANCE_BUCKET": bucket_name,
                "QS_USER_GOVERNANCE_KEY": cf.QS_USER_GOVERNANCE_KEY,
                "OKTA_GROUP_QS_PREFIX": cf.OKTA_GROUP_QS_PREFIX,
                "QS_ADMIN_OKTA_GROUP": cf.QS_ADMIN_OKTA_GROUP,
                "QS_AUTHOR_OKTA_GROUP": cf.QS_AUTHOR_OKTA_GROUP,
                "QS_READER_OKTA_GROUP": cf.QS_READER_OKTA_GROUP
            },
            memory_size=256,
            timeout=core.Duration.seconds(180),
        )

        qs_asset_governance_lambda = _lambda.Function(
            self,
            id=f"{cf.PROJECT}-QSAssetGovernance",
            handler="qs_asset_gov.handler",
            role=quicksight_permission_mapping_role,
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset(os.path.join(cf.PATH_SRC, "pkg")),
            function_name=f"{cf.PROJECT}-QSAssetGovernance",
            environment={
                "QS_GOVERNANCE_BUCKET": bucket_name,
                "QS_ASSET_GOVERNANCE_KEY": cf.QS_ASSET_GOVERNANCE_KEY,
            },
            memory_size=256,
            timeout=core.Duration.seconds(180),
        )

        # -------------------------------
        # Events
        # -------------------------------

        qs_user_governance_lambda.add_event_source(
            lambda_event_sources.S3EventSource(
                bucket=qs_gov_bucket,
                events=[s3.EventType.OBJECT_CREATED],
                filters=[
                    s3.NotificationKeyFilter(prefix=cf.QS_USER_GOVERNANCE_KEY)
                ],
            ))

        qs_asset_governance_lambda.add_event_source(
            lambda_event_sources.S3EventSource(
                bucket=qs_gov_bucket,
                events=[s3.EventType.OBJECT_CREATED],
                filters=[
                    s3.NotificationKeyFilter(prefix=cf.QS_ASSET_GOVERNANCE_KEY)
                ],
            ))

        lambda_schedule = events.Schedule.rate(core.Duration.days(1))
        get_okta_info_target = events_targets.LambdaFunction(
            handler=get_okta_info_lambda)
        events.Rule(
            self,
            id=f"{cf.PROJECT}-GetOktaInfoScheduledEvent",
            description=
            "The once per day CloudWatch event trigger for the Lambda",
            enabled=True,
            schedule=lambda_schedule,
            targets=[get_okta_info_target],
        )

        # -------------------------------
        # S3 Object Deployment - QS Asset Manifest
        # -------------------------------

        asset_manifest_deploy = s3_deploy.BucketDeployment(
            self,
            id=f"{cf.PROJECT}-AssetManifestDeploy",
            sources=[
                s3_deploy.Source.asset(os.path.join(cf.PATH_ROOT, 'qs_config'))
            ],
            destination_bucket=qs_gov_bucket)
コード例 #11
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # ------ Necessary Roles ------
        roles = IamRole(
            self, 'IamRoles'
        )
        

        # ------ S3 Buckets ------
        # Create Athena bucket
        athena_bucket = _s3.Bucket(self, "AthenaBucket",
            removal_policy=core.RemovalPolicy.DESTROY
        )
        # Create Forecast bucket
        forecast_bucket = _s3.Bucket(self, "FoecastBucket",
            removal_policy=core.RemovalPolicy.DESTROY
        )


        # ------ Athena ------ 
        # Config Athena query result output location
        workgroup_prop = _athena.CfnWorkGroup.WorkGroupConfigurationProperty(
            result_configuration=_athena.CfnWorkGroup.ResultConfigurationProperty(
                output_location="s3://"+athena_bucket.bucket_name
            )
        )
        # Create Athena workgroup
        athena_workgroup = _athena.CfnWorkGroup(
            self, 'ForecastGroup',
            name='ForecastGroup', 
            recursive_delete_option=True, 
            state='ENABLED', 
            work_group_configuration=workgroup_prop
        )
            
    
        # ------ SNS Topic ------
        topic = sns.Topic(
            self, 'NotificationTopic',
            display_name='StepsTopic'
        )
        # SNS email subscription. Get the email address from context value(cdk.json)
        topic.add_subscription(subs.EmailSubscription(self.node.try_get_context('my_email')))
         

        # ------ Layers ------
        shared_layer = _lambda.LayerVersion(
            self, 'LambdaLayer',
            layer_version_name='testfolderlayer',
            code=_lambda.AssetCode('shared/')
        )


        # ------ Lambdas for stepfuctions------
        create_dataset_lambda = _lambda.Function(
            self, 'CreateDataset',
            function_name='CreateDataset',
            code=_lambda.Code.asset('lambdas/createdataset/'),
            handler='dataset.lambda_handler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            role=roles.lambda_role,
            timeout=core.Duration.seconds(30),
            layers=[shared_layer]
        )

        create_dataset_group_lambda = _lambda.Function(
            self, 'CreateDatasetGroup',
            function_name='CreateDatasetGroup',
            code = _lambda.Code.asset('lambdas/createdatasetgroup/'),
            handler = 'datasetgroup.lambda_handler',
            runtime = _lambda.Runtime.PYTHON_3_7,
            role=roles.lambda_role,
            layers=[shared_layer]
        )

        import_data_lambda = _lambda.Function(
            self, 'CreateDatasetImportJob',
            function_name='CreateDatasetImportJob',
            code = _lambda.Code.asset('lambdas/createdatasetimportjob/'),
            handler = 'datasetimport.lambda_handler',
            runtime = _lambda.Runtime.PYTHON_3_7,
            role=roles.lambda_role,
            environment= {
                'FORECAST_ROLE': roles.forecast_role.role_arn
            },
            layers=[shared_layer]
        )

        create_predictor_lambda = _lambda.Function(
            self, 'CreatePredictor',
            function_name='CreatePredictor',
            code = _lambda.Code.asset('lambdas/createpredictor/'),
            handler = 'predictor.lambda_handler',
            runtime = _lambda.Runtime.PYTHON_3_7,
            role=roles.lambda_role,
            layers=[shared_layer]
        )

        create_forecast_lambda = _lambda.Function(
            self, 'CreateForecast',
            function_name='CreateForecast',
            code = _lambda.Code.asset('lambdas/createforecast/'),
            handler = 'forecast.lambda_handler',
            runtime = _lambda.Runtime.PYTHON_3_7,
            role=roles.lambda_role,
            environment= {
                'EXPORT_ROLE': roles.forecast_role.role_arn
            },
            layers=[shared_layer],
            timeout=core.Duration.seconds(30)
        )

        # Deploy lambda with python dependencies from requirements.txt
        update_resources_lambda = _lambda_python.PythonFunction(
            self, 'UpdateResources',
            function_name='UpdateResources',
            entry='lambdas/updateresources/',
            index='update.py',
            handler='lambda_handler',
            runtime = _lambda.Runtime.PYTHON_3_7,
            role=roles.update_role,
            environment= {
                'ATHENA_WORKGROUP': athena_workgroup.name,
                'ATHENA_BUCKET' : athena_bucket.bucket_name
            },
            layers=[shared_layer],
            timeout=core.Duration.seconds(900)
        )
        

        notify_lambda = _lambda.Function(
            self, 'NotifyTopic',
            function_name='NotifyTopic',
            code = _lambda.Code.asset('lambdas/notify/'),
            handler = 'notify.lambda_handler',
            runtime = _lambda.Runtime.PYTHON_3_7,
            role=roles.lambda_role,
            environment= {
                'SNS_TOPIC_ARN': topic.topic_arn
            },
            layers=[shared_layer]
        )

        delete_forecast_lambda = _lambda.Function(
            self, 'DeleteForecast',
            function_name='DeleteForecast',
            code = _lambda.Code.asset('lambdas/deleteforecast/'),
            handler = 'deleteforecast.lambda_handler',
            runtime = _lambda.Runtime.PYTHON_3_7,
            role=roles.lambda_role,
            layers=[shared_layer]
        )

        delete_predctor_lambda = _lambda.Function(
            self, 'DeletePredictor',
            function_name='DeletePredictor',
            code = _lambda.Code.asset('lambdas/deletepredictor/'),
            handler = 'deletepredictor.lambda_handler',
            runtime = _lambda.Runtime.PYTHON_3_7,
            role=roles.lambda_role,
            layers=[shared_layer]
        )

        delete_importjob_lambda = _lambda.Function(
            self, 'DeleteImportJob',
            function_name='DeleteImportJob',
            code = _lambda.Code.asset('lambdas/deletedatasetimport/'),
            handler = 'deletedataset.lambda_handler',
            runtime = _lambda.Runtime.PYTHON_3_7,
            role=roles.lambda_role,
            layers=[shared_layer]
        )


        # ------ StepFunctions ------
        strategy_choice = sfn.Choice(
            self, 'Strategy-Choice'
        )

        success_state = sfn.Succeed(
            self, 'SuccessState'
        )

        failed = sfn_tasks.LambdaInvoke(
            self, 'Failed',
            lambda_function = notify_lambda,
            result_path=None
        ).next(strategy_choice)

        create_dataset_job = sfn_tasks.LambdaInvoke(
            self, 'Create-Dataset', 
            lambda_function = create_dataset_lambda,
            retry_on_service_exceptions=True,
            payload_response_only=True
        )

        self.add_retry_n_catch(create_dataset_job, failed)

        create_dataset_group_job = sfn_tasks.LambdaInvoke(
            self, 'Create-DatasetGroup', 
            lambda_function = create_dataset_group_lambda,
            payload_response_only=True
        )
        self.add_retry_n_catch(create_dataset_group_job, failed)


        import_data_job = sfn_tasks.LambdaInvoke(
            self, 'Import-Data',
            lambda_function = import_data_lambda,
            payload_response_only=True
        )
        self.add_retry_n_catch(import_data_job, failed)

        create_predictor_job = sfn_tasks.LambdaInvoke(
            self, 'Create-Predictor',
            lambda_function = create_predictor_lambda,
            payload_response_only=True
        )
        self.add_retry_n_catch(create_predictor_job, failed)

        create_forecast_job = sfn_tasks.LambdaInvoke(
            self, 'Create-Forecast',
            lambda_function = create_forecast_lambda,
            payload_response_only=True
        )
        self.add_retry_n_catch(create_forecast_job, failed)

        update_resources_job = sfn_tasks.LambdaInvoke(
            self, 'Update-Resources',
            lambda_function = update_resources_lambda,
            payload_response_only=True
        )
        self.add_retry_n_catch(update_resources_job, failed)

        notify_success = sfn_tasks.LambdaInvoke(
            self, 'Notify-Success',
            lambda_function = notify_lambda,
            payload_response_only=True
        )

        delete_forecast_job = sfn_tasks.LambdaInvoke(
            self, 'Delete-Forecast',
            lambda_function = delete_forecast_lambda,
            payload_response_only=True
        )
        self.delete_retry(delete_forecast_job)

        delete_predictor_job = sfn_tasks.LambdaInvoke(
            self, 'Delete-Predictor',
            lambda_function = delete_predctor_lambda,
            payload_response_only=True
        )
        self.delete_retry(delete_predictor_job)

        delete_import_job = sfn_tasks.LambdaInvoke(
            self, 'Delete-ImportJob',
            lambda_function = delete_importjob_lambda,
            payload_response_only=True
        )
        self.delete_retry(delete_import_job)
        
        
        definition = create_dataset_job\
            .next(create_dataset_group_job)\
            .next(import_data_job)\
            .next(create_predictor_job)\
            .next(create_forecast_job)\
            .next(update_resources_job)\
            .next(notify_success)\
            .next(strategy_choice.when(sfn.Condition.boolean_equals('$.params.PerformDelete', False), success_state)\
                                .otherwise(delete_forecast_job).afterwards())\
            .next(delete_predictor_job)\
            .next(delete_import_job)
                    
            
        deployt_state_machine = sfn.StateMachine(
            self, 'StateMachine',
            definition = definition
            # role=roles.states_execution_role
        )

        # S3 event trigger lambda
        s3_lambda = _lambda.Function(
            self, 'S3Lambda',
            function_name='S3Lambda',
            code=_lambda.Code.asset('lambdas/s3lambda/'),
            handler='parse.lambda_handler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            role=roles.trigger_role,
            environment= {
                'STEP_FUNCTIONS_ARN': deployt_state_machine.state_machine_arn,
                'PARAMS_FILE': self.node.try_get_context('parameter_file')
            }
        )
        s3_lambda.add_event_source(
            event_src.S3EventSource(
                bucket=forecast_bucket,
                events=[_s3.EventType.OBJECT_CREATED],
                filters=[_s3.NotificationKeyFilter(
                    prefix='train/',
                    suffix='.csv'
                )]
            )
        )

        # CloudFormation output
        core.CfnOutput(
            self, 'StepFunctionsName',
            description='Step Functions Name',
            value=deployt_state_machine.state_machine_name
        )

        core.CfnOutput(
            self, 'ForecastBucketName',
            description='Forecast bucket name to drop you files',
            value=forecast_bucket.bucket_name
        )

        core.CfnOutput(
            self, 'AthenaBucketName',
            description='Athena bucket name to drop your files',
            value=athena_bucket.bucket_name
        )
コード例 #12
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        stack_role = iam.Role(
            self,
            "SimulationServiceRole",
            assumed_by=iam.ServicePrincipal("batch.amazonaws.com"),
        )

        stack_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AdministratorAccess"))

        job_role = iam.Role(
            self,
            "SimulationJobServiceRole",
            assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"),
        )

        job_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AdministratorAccess"))

        lambda_role = iam.Role(
            self,
            "SimulationLambdaServiceRole",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
        )

        lambda_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AdministratorAccess"))

        # Create Input S3
        input_bucket = s3.Bucket(self, "InputS3Bucket")

        # Create Output S3
        output_bucket = s3.Bucket(self, "OutputS3Bucket")

        # admin_policy = iam.from_policy_name("AdministratorAccess", "AdministratorAccess")

        job_table = aws_dynamodb.Table(
            self,
            id="JobTable",
            partition_key=aws_dynamodb.Attribute(
                name="PK", type=aws_dynamodb.AttributeType.STRING),
            stream=aws_dynamodb.StreamViewType.NEW_AND_OLD_IMAGES,
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        orchestration_handler_lambda = aws_lambda.Function(
            self,
            id="JobOrchestrationHandler",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            handler="orchestration_handler_lambda.handler",
            code=aws_lambda.Code.asset("./simulations_service/functions/"),
        )

        # Give only write access to the post handler
        job_table.grant_write_data(orchestration_handler_lambda)

        # Pass table_name as env variable
        orchestration_handler_lambda.add_environment("TABLE_NAME",
                                                     job_table.table_name)

        # Create lambda function for processing dynamodb streams
        dynamodb_streams_processor_lambda = aws_lambda.Function(
            self,
            id="JobsDynamoDBStreamsProcessor",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            handler="dynamodb_streams_processor_lambda.handler",
            code=aws_lambda.Code.asset("./simulations_service/functions/"),
            role=lambda_role,
        )

        # Add dynamo db as lambda event source
        dynamodb_streams_processor_lambda.add_event_source(
            aws_lambda_event_sources.DynamoEventSource(
                job_table,
                starting_position=aws_lambda.StartingPosition.LATEST,
                batch_size=1,
            ))

        dynamodb_streams_processor_lambda.add_environment(
            "S3_OUTPUT_BUCKET", output_bucket.bucket_name)

        dynamodb_streams_processor_lambda.add_environment(
            "TABLE_NAME", job_table.table_name)

        vpc = ec2.Vpc(self, "VPC")

        spot_environment = batch.ComputeEnvironment(
            self,
            "MyComputedEnvironment",
            compute_resources={
                "vpc": vpc,
            },
            service_role=stack_role.without_policy_updates(),
        )

        job_queue = batch.JobQueue(
            self,
            "JobQueue",
            compute_environments=[
                batch.JobQueueComputeEnvironment(
                    compute_environment=spot_environment, order=1)
            ],
        )

        dynamodb_streams_processor_lambda.add_environment(
            "JOB_QUEUE", job_queue.job_queue_name)

        job_definition = batch.JobDefinition(
            self,
            "batch-job-def-from-local",
            container={
                "image":
                ecs.ContainerImage.from_asset("./simulations_service/job/"),
                "memory_limit_mib":
                500,
                "privileged":
                True,
                "job_role":
                job_role,
            },
        )

        dynamodb_streams_processor_lambda.add_environment(
            "JOB_DEFINITION", job_definition.job_definition_name)

        orchestration_handler_lambda.add_event_source(
            aws_lambda_event_sources.S3EventSource(
                bucket=input_bucket,
                events=[s3.EventType.OBJECT_CREATED],
            ))
コード例 #13
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        bucket_with_sns = s3.Bucket(self,
                                    "bucket-with-sns-integration",
                                    removal_policy=core.RemovalPolicy.DESTROY)

        bucket_with_lambda = s3.Bucket(
            self,
            "bucket-with-lambda-integration",
            removal_policy=core.RemovalPolicy.DESTROY)

        exchange_topic = sns.Topic(self, "lambda-info-topic")

        bucket_with_sns.add_event_notification(
            event=s3.EventType.OBJECT_CREATED,
            dest=s3_notifications.SnsDestination(exchange_topic))

        measurement_table = dynamodb.Table(
            self,
            "measurement-table",
            partition_key=dynamodb.Attribute(
                name="PK", type=dynamodb.AttributeType.STRING),
            sort_key=dynamodb.Attribute(name="SK",
                                        type=dynamodb.AttributeType.STRING),
            billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST,
            removal_policy=core.RemovalPolicy.DESTROY)

        s3_event_generator = _lambda.Function(
            self,
            "s3-event-generator",
            code=_lambda.Code.from_asset(
                path=os.path.join(os.path.dirname(__file__), "..", "src")),
            environment={
                "BUCKET_WITH_LAMBDA": bucket_with_lambda.bucket_name,
                "BUCKET_WITH_SNS": bucket_with_sns.bucket_name,
            },
            handler="s3_event_generator.lambda_handler",
            runtime=_lambda.Runtime.PYTHON_3_8,
            timeout=core.Duration.seconds(300),
            memory_size=1024,
        )

        bucket_with_lambda.grant_write(s3_event_generator)
        bucket_with_sns.grant_write(s3_event_generator)

        measure_lambda = _lambda.Function(
            self,
            "measure-lambda",
            code=_lambda.Code.from_asset(
                path=os.path.join(os.path.dirname(__file__), "..", "src")),
            environment={"MEASUREMENT_TABLE": measurement_table.table_name},
            handler="measure_lambda.lambda_handler",
            runtime=_lambda.Runtime.PYTHON_3_8,
            timeout=core.Duration.seconds(50),
            memory_size=1024,
        )

        measurement_table.grant_read_write_data(measure_lambda)

        measure_lambda.add_event_source(
            lambda_event_sources.SnsEventSource(exchange_topic))

        measure_lambda.add_event_source(
            lambda_event_sources.S3EventSource(
                bucket=bucket_with_lambda,
                events=[s3.EventType.OBJECT_CREATED]))

        ssm.StringParameter(self,
                            "bucket-with-sns-parameter",
                            string_value=bucket_with_sns.bucket_name,
                            parameter_name=BUCKET_WITH_SNS_PARAMETER)

        ssm.StringParameter(self,
                            "bucket-with-lambda-parameter",
                            string_value=bucket_with_lambda.bucket_name,
                            parameter_name=BUCKET_WITH_LAMBDA_PARAMETER)

        ssm.StringParameter(self,
                            "measurement-table-parameter",
                            string_value=measurement_table.table_name,
                            parameter_name=MEASUREMENT_TABLE_PARAMETER)

        ssm.StringParameter(self,
                            "generator-function-name-parameter",
                            string_value=s3_event_generator.function_name,
                            parameter_name=GENERATOR_FUNCTION_NAME_PARAMETER)
コード例 #14
0
ファイル: app.py プロジェクト: tzickel/awsdemo
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # TODO should this stuff be passed as inputs to the stack ?
        source_code_directory = "/opt/python"
        # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Cluster.html#aws_cdk.aws_ecs.Cluster.add_capacity
        asg_parameters = {
            "instance_type":
            ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2,
                                ec2.InstanceSize.MICRO),
            "machine_image":
            ecs.EcsOptimizedImage.amazon_linux2(),
            "desired_capacity":
            0,
            "max_capacity":
            5,
            "min_capacity":
            0,
        }
        # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Ec2TaskDefinition.html#aws_cdk.aws_ecs.Ec2TaskDefinition.add_container
        container_settings = {
            "memory_limit_mib": 300,
            "logging": ecs.AwsLogDriver(stream_prefix="ecslogs"),
        }
        input_bucket_name = "cdkdemoinput"
        output_bucket_name = "cdkdemooutput"

        # Create an Docker image from an given directory, which will be later published to Amazon ECR
        # TODO can this be cleanup on destroy as well ?
        container_image = ecs.ContainerImage.from_asset(
            directory=source_code_directory)

        # Create an Amazon ECS cluster
        cluster = ecs.Cluster(self, "ecscluster")
        cluster.apply_removal_policy(cdk.RemovalPolicy.DESTROY)

        # Create an auto scaling group for the ECS cluster
        asg = cluster.add_capacity("ecsautoscalinggroup", **asg_parameters)
        # TODO check if needed
        asg.apply_removal_policy(cdk.RemovalPolicy.DESTROY)

        # Create a capacity provider for the ECS cluster based on the auto scaling group
        capacity_provider = ecs.CfnCapacityProvider(
            self,
            "ecscapacityprovider",
            # Name can't start with ecs...
            name="capacityproviderecs",
            auto_scaling_group_provider=ecs.CfnCapacityProvider.
            AutoScalingGroupProviderProperty(
                auto_scaling_group_arn=asg.auto_scaling_group_name,
                managed_scaling=ecs.CfnCapacityProvider.ManagedScalingProperty(
                    status="ENABLED"),
                # TODO investigate this better
                managed_termination_protection="DISABLED",
            ),
        )
        capacity_provider.apply_removal_policy(cdk.RemovalPolicy.DESTROY)

        # Currently the CDK checks if the string is FARGATE or FARGATE_SPOT and errors out
        # cluster.add_capacity_provider(capacity_provider.name)
        lame_hack = cr.AwsCustomResource(
            self,
            "lamehack",
            on_create={
                "service":
                "ECS",
                "action":
                "putClusterCapacityProviders",
                "parameters": {
                    "cluster": cluster.cluster_arn,
                    "capacityProviders": [capacity_provider.name],
                    "defaultCapacityProviderStrategy": [],
                },
                "physical_resource_id":
                cr.PhysicalResourceId.of(str(int(time.time()))),
            },
            on_delete={
                "service": "ECS",
                "action": "putClusterCapacityProviders",
                "parameters": {
                    "cluster": cluster.cluster_arn,
                    "capacityProviders": [],
                    "defaultCapacityProviderStrategy": [],
                },
            },
            # TODO lower this permissions
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(
                resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE),
        )
        lame_hack.node.add_dependency(capacity_provider)
        lame_hack.node.add_dependency(cluster)

        # Create an ECS task definition with our Docker image
        task_definition = ecs.Ec2TaskDefinition(self, "ecstaskdefinition")
        container_definition = task_definition.add_container(
            "ecscontainer", image=container_image, **container_settings)
        # TODO lower this permissions
        task_definition.task_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonS3FullAccess"))
        task_definition.apply_removal_policy(cdk.RemovalPolicy.DESTROY)

        # Create the Amazon S3 input and output buckets
        input_bucket = s3.Bucket(
            self,
            "bucketinput",
            bucket_name=input_bucket_name,
            versioned=False,
            removal_policy=cdk.RemovalPolicy.DESTROY,
            auto_delete_objects=True,
        )

        output_bucket = s3.Bucket(
            self,
            "bucketoutput",
            bucket_name=output_bucket_name,
            versioned=False,
            removal_policy=cdk.RemovalPolicy.DESTROY,
            auto_delete_objects=True,
        )

        # Create the Amazon Lambda function for transforming the input from bucket information to the container inputs
        function = lambda_.Function(
            self,
            "inputlambda",
            code=lambda_.Code.from_inline(lambda_function_code),
            handler="index.lambda_handler",
            runtime=lambda_.Runtime.PYTHON_3_8,
            environment={
                "CAPACITY_PROVIDER_NAME": capacity_provider.name,
                "CLUSTER_NAME": cluster.cluster_arn,
                "CONTAINER_NAME": container_definition.container_name,
                "REGION_NAME": self.region,
                # TODO flaky, why can't we pass the ARN directly ?
                "OUTPUT_BUCKET_NAME": output_bucket.bucket_name,
                "TASK_DEFINITION": task_definition.task_definition_arn,
            },
        )
        # Add an S3 object creation trigger for the function
        function.add_event_source(
            lambda_event_sources.S3EventSource(
                input_bucket, events=[s3.EventType.OBJECT_CREATED]))
        # TODO fix this for less permissions
        function.role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonECS_FullAccess"))
        function.apply_removal_policy(cdk.RemovalPolicy.DESTROY)
コード例 #15
0
    def __init__(self, scope: core.Construct, id: str, instance_id: str,
                 contact_flow_id: str, source_phone_number: str, timeout: int,
                 **kwargs):
        super().__init__(scope, id, **kwargs)

        web_bucket = _s3.Bucket(self,
                                "StaticWebBucket",
                                website_index_document="index.html",
                                website_error_document="index.html",
                                removal_policy=core.RemovalPolicy.DESTROY,
                                public_read_access=True)

        core.CfnOutput(self,
                       'WebBucketUrl',
                       value=web_bucket.bucket_domain_name)

        web_distribution = _clf.CloudFrontWebDistribution(
            self,
            'StaticWebDistribution',
            origin_configs=[
                _clf.SourceConfiguration(
                    s3_origin_source=_clf.S3OriginConfig(
                        s3_bucket_source=web_bucket),
                    behaviors=[_clf.Behavior(is_default_behavior=True)])
            ],
            viewer_protocol_policy=_clf.ViewerProtocolPolicy.REDIRECT_TO_HTTPS)

        _s3d.BucketDeployment(
            self,
            "S3StaticWebContentDeploymentWithInvalidation",
            sources=[
                _s3d.Source.asset(
                    f"{pathlib.Path(__file__).parent.absolute()}/site-content/build"
                )
            ],
            destination_bucket=web_bucket,
            distribution=web_distribution,
            distribution_paths=["/*"])

        file_bucket = _s3.Bucket(self,
                                 "FileBucket",
                                 removal_policy=core.RemovalPolicy.DESTROY)

        call_dead_letter_queue = _sqs.Queue(self,
                                            "CallDeadLetterQueue",
                                            fifo=True,
                                            content_based_deduplication=True)

        call_sqs_queue = _sqs.Queue(
            self,
            "CallSqsQueue",
            fifo=True,
            content_based_deduplication=True,
            visibility_timeout=core.Duration.seconds(120),
            dead_letter_queue=_sqs.DeadLetterQueue(
                max_receive_count=1, queue=call_dead_letter_queue))

        async_call_dead_letter_queue = _sqs.Queue(
            self,
            "AsyncCallDeadLetterQueue",
            fifo=True,
            content_based_deduplication=True)

        async_callout_queue = _sqs.Queue(
            self,
            "AsyncCalloutQueue",
            fifo=True,
            content_based_deduplication=True,
            visibility_timeout=core.Duration.seconds(120),
            dead_letter_queue=_sqs.DeadLetterQueue(
                max_receive_count=1, queue=async_call_dead_letter_queue))

        call_job_complete_sns_topic = _sns.Topic(
            self, "CallJobCompleteSnsTopic", display_name="CallJobCompletion")

        call_result_table = _dynamodb.Table(
            self,
            "CallResultDynamodbTable",
            billing_mode=_dynamodb.BillingMode.PAY_PER_REQUEST,
            partition_key=_dynamodb.Attribute(
                name="task_id", type=_dynamodb.AttributeType.STRING),
            sort_key=_dynamodb.Attribute(name="receiver_id",
                                         type=_dynamodb.AttributeType.STRING),
            removal_policy=core.RemovalPolicy.DESTROY)

        callout_record_table = _dynamodb.Table(
            self,
            "CallTaskDynamodbTable",
            billing_mode=_dynamodb.BillingMode.PAY_PER_REQUEST,
            partition_key=_dynamodb.Attribute(
                name="task_id", type=_dynamodb.AttributeType.STRING),
            sort_key=_dynamodb.Attribute(name="created_at",
                                         type=_dynamodb.AttributeType.NUMBER),
            removal_policy=core.RemovalPolicy.DESTROY)
        callout_record_table.add_global_secondary_index(
            partition_key=_dynamodb.Attribute(
                name='call_type', type=_dynamodb.AttributeType.STRING),
            sort_key=_dynamodb.Attribute(name='created_at',
                                         type=_dynamodb.AttributeType.NUMBER),
            index_name='CallTypeCreatedAtGlobalIndex',
            projection_type=_dynamodb.ProjectionType.ALL)

        python_function_layer = _lambda.LayerVersion(
            self,
            "LambdaPythonFunctionLayer",
            code=_lambda.Code.asset("aws_callouts_cdk/layer/_python"),
            compatible_runtimes=[
                _lambda.Runtime.PYTHON_3_7, _lambda.Runtime.PYTHON_3_8
            ],
            license="Available under the MIT-0 license")

        nodejs_function_layer = _lambda.LayerVersion(
            self,
            "LambdaNodeJsFunctionLayer",
            code=_lambda.Code.asset("aws_callouts_cdk/layer/_nodejs"),
            compatible_runtimes=[
                _lambda.Runtime.NODEJS_10_X, _lambda.Runtime.NODEJS_12_X
            ],
            license="Available under the MIT-0 license")

        global_python_function_arguments = {
            "code": _lambda.Code.asset("aws_callouts_cdk/src/python"),
            "layers": [python_function_layer],
            "runtime": _lambda.Runtime.PYTHON_3_7
        }

        global_nodeje_function_arguments = {
            "code": _lambda.Code.asset("aws_callouts_cdk/src/nodejs"),
            "layers": [nodejs_function_layer],
            "runtime": _lambda.Runtime.NODEJS_12_X
        }

        get_callout_job_function = _lambda.Function(
            self,
            "GetCalloutJobFunction",
            handler="get_call_job.lambda_handler",
            **global_python_function_arguments)
        get_callout_job_function.add_environment(key="S3Bucket",
                                                 value=file_bucket.bucket_name)
        file_bucket.grant_read(get_callout_job_function)

        callout_function = _lambda.Function(self,
                                            "CalloutFunction",
                                            handler="send_call.lambda_handler",
                                            **global_python_function_arguments)
        callout_function.add_environment(
            key="ContactFlowArn",
            value=
            f"arn:aws:connect:{self.region}:{self.account}:instance/{instance_id}/contact-flow/{contact_flow_id}"
        )
        callout_function.add_environment(key="SourcePhoneNumber",
                                         value=source_phone_number)
        callout_function.add_environment(key="ExcelFileBucket",
                                         value=file_bucket.bucket_name)
        callout_function.add_environment(key="AsynCalloutQueueUrl",
                                         value=async_callout_queue.queue_url)
        callout_function.add_to_role_policy(statement=_iam.PolicyStatement(
            resources=[
                f"arn:aws:connect:{self.region}:{self.account}:instance/{instance_id}/*"
            ],
            actions=["connect:StartOutboundVoiceContact"]))
        callout_function.add_event_source(source=_les.SqsEventSource(
            queue=async_callout_queue, batch_size=1))
        'arn:aws:connect:751225572132:ap-southeast-2:instance/9d0c7cc5-7d2a-42e4-a3dd-70f402e0d040'
        file_bucket.grant_read_write(callout_function)

        response_handler_function = _lambda.Function(
            self,
            "ResponseHandlerFunction",
            handler="response_handler.lambda_handler",
            **global_python_function_arguments)
        response_handler_function.add_permission(
            id="ResponseHandlerFunctionLambdaInvokePermission",
            principal=_iam.ServicePrincipal(service="connect.amazonaws.com"),
            action="lambda:InvokeFunction",
            source_account=self.account,
            source_arn=
            f"arn:aws:connect:{self.region}:{self.account}:instance/{instance_id}"
        )

        send_task_success_function = _lambda.Function(
            self,
            "SendTaskSuccessFunction",
            handler="send_task_success.lambda_handler",
            **global_python_function_arguments)
        send_task_success_function.add_permission(
            id="SendTaskSuccessFunctionLambdaInvokePermission",
            principal=_iam.ServicePrincipal(service="connect.amazonaws.com"),
            action="lambda:InvokeFunction",
            source_account=self.account,
            source_arn=
            f"arn:aws:connect:{self.region}:{self.account}:instance/{instance_id}"
        )

        get_call_result_function = _lambda.Function(
            self,
            "GetCallResultFunction",
            handler="get_call_result.lambda_handler",
            memory_size=512,
            **global_python_function_arguments)
        get_call_result_function.add_environment(
            key="CallResultDynamoDBTable", value=call_result_table.table_name)
        get_call_result_function.add_environment(key="S3Bucket",
                                                 value=file_bucket.bucket_name)
        call_result_table.grant_read_data(grantee=get_call_result_function)
        file_bucket.grant_read_write(get_call_result_function)

        iterator_function = _lambda.Function(
            self,
            "IteratorFunction",
            handler="iterator.lambda_handler",
            **global_python_function_arguments)
        iterator_function.add_permission(
            id="IteratorFunctionLambdaInvokePermission",
            principal=_iam.ServicePrincipal(service="connect.amazonaws.com"),
            action="lambda:InvokeFunction",
            source_account=self.account,
            source_arn=
            f"arn:aws:connect:{self.region}:{self.account}:instance/{instance_id}"
        )

        create_appsync_call_task_function = _lambda.Function(
            self,
            "CreateAppSyncCallTaskFunction",
            handler="create_appsync_call_task.lambda_handler",
            **global_nodeje_function_arguments)
        create_appsync_call_task_function.add_environment(
            key="CallSqsQueueUrl", value=call_sqs_queue.queue_url)
        create_appsync_call_task_function.add_environment(
            key="CallRecordTableName", value=callout_record_table.table_name)
        call_sqs_queue.grant_send_messages(create_appsync_call_task_function)
        callout_record_table.grant_write_data(
            create_appsync_call_task_function)

        create_call_report_record_function = _lambda.Function(
            self,
            "CreateCallReportRecordFunction",
            handler="create_call_report_record.lambda_handler",
            **global_nodeje_function_arguments)

        create_excel_call_task_function = _lambda.Function(
            self,
            "CreateExcelCallTaskFunction",
            handler="create_excel_call_task.lambda_handler",
            **global_python_function_arguments)
        create_excel_call_task_function.add_environment(
            key="CallSqsQueueUrl", value=call_sqs_queue.queue_url)
        call_sqs_queue.grant_send_messages(create_excel_call_task_function)

        create_excel_call_task_function.add_event_source(
            source=_les.S3EventSource(bucket=file_bucket,
                                      events=[_s3.EventType.OBJECT_CREATED],
                                      filters=[
                                          _s3.NotificationKeyFilter(
                                              prefix="call_task",
                                              suffix=".xlsx")
                                      ]))

        start_callout_flow_function = _lambda.Function(
            self,
            "StartCalloutFlowFunction",
            handler="start_call_out_flow.lambda_handler",
            reserved_concurrent_executions=1,
            **global_python_function_arguments)
        start_callout_flow_function.add_environment(
            key="CallSqsQueueUrl", value=call_sqs_queue.queue_url)
        start_callout_flow_function.add_environment(
            key="ResponseHandlerFunctionArn",
            value=response_handler_function.function_arn)
        start_callout_flow_function.add_environment(
            key="IteratorFunctionArn", value=iterator_function.function_arn)
        start_callout_flow_function.add_environment(
            key="SendTaskSuccessFunctionArn",
            value=send_task_success_function.function_arn)
        start_callout_flow_function.add_environment(
            key="S3Bucket", value=file_bucket.bucket_name)
        start_callout_flow_function.add_event_source(
            source=_les.SqsEventSource(queue=call_sqs_queue, batch_size=1))
        file_bucket.grant_read_write(start_callout_flow_function)

        call_state_machine_definition = {
            "Comment":
            "Reading messages from an SQS queue and iteratively processing each message.",
            "StartAt": "Start",
            "States": {
                "Start": {
                    "Type": "Pass",
                    "Next": "Process Call Messages"
                },
                "Process Call Messages": {
                    "Type": "Map",
                    "Next": "Get Call Result",
                    "InputPath": "$",
                    "ItemsPath": "$",
                    "OutputPath": "$.[0]",
                    "Iterator": {
                        "StartAt": "Get Call out job",
                        "States": {
                            "Get Call out job": {
                                "Type": "Task",
                                "Resource":
                                get_callout_job_function.function_arn,
                                "Next": "Callout with AWS Connect"
                            },
                            "Callout with AWS Connect": {
                                "Type":
                                "Task",
                                "Resource":
                                "arn:aws:states:::sqs:sendMessage.waitForTaskToken",
                                "TimeoutSeconds":
                                timeout,
                                "Parameters": {
                                    "QueueUrl": async_callout_queue.queue_url,
                                    "MessageGroupId": "1",
                                    "MessageBody": {
                                        "Message.$": "$",
                                        "TaskToken.$": "$$.Task.Token"
                                    }
                                },
                                "Catch": [{
                                    "ErrorEquals": ["States.Timeout"],
                                    "ResultPath": None,
                                    "Next": "Call Timeout"
                                }],
                                "Next":
                                "Save call result"
                            },
                            "Call Timeout": {
                                "Type": "Pass",
                                "ResultPath": None,
                                "Next": "Save call result"
                            },
                            "Save call result": {
                                "Type": "Task",
                                "Resource":
                                "arn:aws:states:::dynamodb:putItem",
                                "Parameters": {
                                    "TableName": call_result_table.table_name,
                                    "Item": {
                                        "receiver_id": {
                                            "S.$": "$.receiver_id"
                                        },
                                        "task_id": {
                                            "S.$": "$.task_id"
                                        },
                                        "username": {
                                            "S.$": "$.username"
                                        },
                                        "phone_number": {
                                            "S.$": "$.phone_number"
                                        },
                                        "status": {
                                            "S.$": "$.status"
                                        },
                                        "answers": {
                                            "S.$": "$.answers"
                                        },
                                        "error": {
                                            "S.$": "$.error"
                                        },
                                        "call_at": {
                                            "S.$": "$.call_at"
                                        }
                                    }
                                },
                                "ResultPath": "$.Result",
                                "OutputPath": "$.task_id",
                                "End": True
                            }
                        }
                    }
                },
                "Get Call Result": {
                    "Type": "Task",
                    "Resource": get_call_result_function.function_arn,
                    "Next": "Create Call Report Record"
                },
                "Create Call Report Record": {
                    "Type": "Task",
                    "Resource":
                    create_call_report_record_function.function_arn,
                    "Next": "Send Completion message to SNS"
                },
                "Send Completion message to SNS": {
                    "Type": "Task",
                    "Resource": "arn:aws:states:::sns:publish",
                    "Parameters": {
                        "TopicArn": call_job_complete_sns_topic.topic_arn,
                        "Message.$": "$"
                    },
                    "Next": "Finish"
                },
                "Finish": {
                    "Type": "Succeed"
                }
            }
        }
        callout_state_machine_role = _iam.Role(
            self,
            "CalloutStatesExecutionRole",
            assumed_by=_iam.ServicePrincipal(
                f"states.{self.region}.amazonaws.com"))
        callout_state_machine_role.add_to_policy(
            _iam.PolicyStatement(
                actions=[
                    "sqs:SendMessage", "dynamodb:PutItem",
                    "lambda:InvokeFunction", "SNS:Publish"
                ],
                resources=[
                    async_callout_queue.queue_arn, call_result_table.table_arn,
                    get_callout_job_function.function_arn,
                    get_call_result_function.function_arn,
                    call_job_complete_sns_topic.topic_arn,
                    create_appsync_call_task_function.function_arn,
                    create_call_report_record_function.function_arn
                ]))
        callout_state_machine = _sfn.CfnStateMachine(
            self,
            "CalloutStateMachine",
            role_arn=callout_state_machine_role.role_arn,
            definition_string=json.dumps(call_state_machine_definition))
        send_task_success_function.add_to_role_policy(
            _iam.PolicyStatement(actions=["states:SendTaskSuccess"],
                                 resources=[callout_state_machine.ref]))

        start_callout_flow_function.add_environment(
            key="CalloutStateMachineArn", value=callout_state_machine.ref)
        start_callout_flow_function.add_to_role_policy(
            _iam.PolicyStatement(effect=_iam.Effect.ALLOW,
                                 resources=[callout_state_machine.ref],
                                 actions=['states:StartExecution']))

        user_pool = _cognito.UserPool(
            self, "UserPool", sign_in_type=_cognito.SignInType.USERNAME)

        user_pool_client = _cognito.UserPoolClient(self,
                                                   "UserPoolClient",
                                                   user_pool=user_pool)

        appsync_api = _appsync.GraphQLApi(
            self,
            "AppSyncApi",
            name="AWSCalloutApi",
            user_pool_config=_appsync.UserPoolConfig(
                user_pool=user_pool,
                default_action=_appsync.UserPoolDefaultAction.ALLOW),
            log_config=_appsync.LogConfig(
                field_log_level=_appsync.FieldLogLevel.ALL),
            schema_definition_file=
            f"{pathlib.Path(__file__).parent.absolute()}/schema.graphql")

        callout_record_ddb_ds = appsync_api.add_dynamo_db_data_source(
            name="CalloutRecordDdb",
            description="Callout Record DynamoDB Data Source",
            table=callout_record_table)
        callout_record_ddb_ds.create_resolver(
            type_name="Query",
            field_name="getLatestCallTaskRecords",
            request_mapping_template=_appsync.MappingTemplate.from_string(
                '{"version":"2017-02-28","operation":"Query","index":"CallTypeCreatedAtGlobalIndex","query":{"expression":"call_type = :call_type","expressionValues":{":call_type":{"S":"TASK"}}},"scanIndexForward":false,"limit":${ctx.args.limit}}'
            ),
            response_mapping_template=_appsync.MappingTemplate.
            dynamo_db_result_list())
        callout_record_ddb_ds.create_resolver(
            type_name="Query",
            field_name="getLatestCallReportRecords",
            request_mapping_template=_appsync.MappingTemplate.from_string(
                '{"version":"2017-02-28","operation":"Query","index":"CallTypeCreatedAtGlobalIndex","query":{"expression":"call_type = :call_type","expressionValues":{":call_type":{"S":"REPORT"}}},"scanIndexForward":false,"limit":${ctx.args.limit}}'
            ),
            response_mapping_template=_appsync.MappingTemplate.
            dynamo_db_result_list())
        callout_record_ddb_ds.create_resolver(
            type_name="Mutation",
            field_name="createCallReport",
            request_mapping_template=_appsync.MappingTemplate.from_string(
                '{"version":"2017-02-28","operation":"PutItem","key":{"task_id":{"S":"${ctx.args.report.task_id}"},"created_at":{"N":"${ctx.args.report.created_at}"}},"attributeValues":$util.dynamodb.toMapValuesJson($ctx.args.report)}'
            ),
            response_mapping_template=_appsync.MappingTemplate.
            dynamo_db_result_item())

        call_task_lambda_ds = appsync_api.add_lambda_data_source(
            name="CallTaskLambda",
            description="Call Task Lambda Data Source",
            lambda_function=create_appsync_call_task_function)
        call_task_lambda_ds.create_resolver(
            type_name="Mutation",
            field_name="createCallTask",
            request_mapping_template=_appsync.MappingTemplate.lambda_request(
                "$utils.toJson($ctx.args)"),
            response_mapping_template=_appsync.MappingTemplate.lambda_result())

        create_call_report_record_function.add_environment(
            value=appsync_api.graph_ql_url, key="AppSyncGraphQlApiUrl")

        create_call_report_record_function.add_to_role_policy(
            statement=_iam.PolicyStatement(
                effect=_iam.Effect.ALLOW,
                actions=['appsync:GraphQL'],
                resources=[
                    f"{appsync_api.arn}/types/Mutation/fields/createCallReport"
                ]))

        core.CfnOutput(self,
                       id="OutputCallSqsQueue",
                       value=call_sqs_queue.queue_arn)
        core.CfnOutput(self,
                       id="OutputCallJobCompletionSNSTopic",
                       value=call_job_complete_sns_topic.topic_arn)
        core.CfnOutput(self,
                       id="OutputExcelFileS3Bucket",
                       value=file_bucket.bucket_name)
        core.CfnOutput(self,
                       id="OutputStaticWebS3Bucket",
                       value=web_bucket.bucket_name)
        core.CfnOutput(self,
                       id="OutputStaticWebUrl",
                       value=web_bucket.bucket_website_url)

        identity_pool = _cognito.CfnIdentityPool(
            self,
            "IdentityPool",
            allow_unauthenticated_identities=True,
            cognito_identity_providers=[
                _cognito.CfnIdentityPool.CognitoIdentityProviderProperty(
                    provider_name=user_pool.user_pool_provider_name,
                    client_id=user_pool_client.user_pool_client_id)
            ])
        identity_pool_unauthorized_role = _iam.Role(
            self,
            'IdentityPoolUnAuthorizedRole',
            assumed_by=_iam.FederatedPrincipal(
                federated="cognito-identity.amazonaws.com",
                assume_role_action="sts:AssumeRoleWithWebIdentity",
                conditions={
                    "StringEquals": {
                        "cognito-identity.amazonaws.com:aud": identity_pool.ref
                    },
                    "ForAnyValue:StringLike": {
                        "cognito-identity.amazonaws.com:amr": "unauthenticated"
                    }
                }))
        identity_pool_unauthorized_role.add_to_policy(
            _iam.PolicyStatement(
                effect=_iam.Effect.ALLOW,
                actions=["appsync:GraphQL"],
                resources=[
                    f"{appsync_api.arn}/types/*",
                    # f"{appsync_api.arn}/types/Query/fields/getLatestCallTaskRecords",
                    # f"{appsync_api.arn}/types/Query/fields/getLatestCallReportRecords",
                    # f"{appsync_api.arn}/types/Mutation/fields/createCallRecord",
                    # f"{appsync_api.arn}/types/Subscription/fields/createCallTask",
                    # f"{appsync_api.arn}/types/Subscription/fields/createCallReport"
                ]))

        _cognito.CfnIdentityPoolRoleAttachment(
            self,
            "CognitoIdentityPoolRoleAttachment",
            identity_pool_id=identity_pool.ref,
            roles={
                "unauthenticated": identity_pool_unauthorized_role.role_arn
            })

        core.CfnOutput(self, id="UserPoolId", value=user_pool.user_pool_id)
        core.CfnOutput(self,
                       id="UserPoolClientId",
                       value=user_pool_client.user_pool_client_id)
        core.CfnOutput(self, id="IdentityPoolId", value=identity_pool.ref)
コード例 #16
0
    def __init__(self, scope: core.Stack, id: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        self.output_bucket = aws_s3.Bucket(
            self, "BucketTwitterStreamOutput",
            bucket_name = self.stack_name,
        )

        self.bucket_url = self.output_bucket.bucket_regional_domain_name

        # Because kinesis firehose bindings are to direct CF, we have to create IAM policy/role and attach on our own
        self.iam_role = aws_iam.Role(
            self, "IAMRoleTwitterStreamKinesisFHToS3",
            role_name="KinesisFirehoseToS3-{}".format(self.stack_name),
            assumed_by=aws_iam.ServicePrincipal(service='firehose.amazonaws.com'),
        )

        # S3 bucket actions
        self.s3_iam_policy_statement = aws_iam.PolicyStatement()
        actions = ["s3:GetBucketLocation", "s3:GetObject", "s3:ListBucket", "s3:ListBucketMultipartUploads", "s3:PutObject"]
        for action in actions:
            self.s3_iam_policy_statement.add_actions(action)
        self.s3_iam_policy_statement.add_resources(self.output_bucket.bucket_arn)
        self.s3_iam_policy_statement.add_resources(self.output_bucket.bucket_arn + "/*")

        # CW error log setup
        self.s3_error_logs_group = aws_logs.LogGroup(
            self, "S3ErrorLogsGroup",
            log_group_name="{}-s3-errors".format(self.stack_name)
        )

        self.s3_error_logs_stream = aws_logs.LogStream(
            self, "S3ErrorLogsStream",
            log_group=self.s3_error_logs_group,
            log_stream_name='s3Backup'
        )

        self.firehose = aws_kinesisfirehose.CfnDeliveryStream(
            self, "FirehoseTwitterStream",
            delivery_stream_name = "{}-raw".format(self.stack_name),
            delivery_stream_type = "DirectPut",
            s3_destination_configuration={
                'bucketArn': self.output_bucket.bucket_arn,
                'bufferingHints': {
                    'intervalInSeconds': 120,
                    'sizeInMBs': 10
                },
                'compressionFormat': 'UNCOMPRESSED',
                'roleArn': self.iam_role.role_arn,
                'cloudWatchLoggingOptions': {
                    'enabled': True,
                    'logGroupName': "{}-raw".format(self.stack_name),
                    'logStreamName': 's3BackupRaw'
                },
                'prefix': 'twitter-raw/'
            },
        )

        # TODO: Only attach what's needed for this policy, right now i'm lazy and attaching all policies
        self.iam_policy = aws_iam.Policy(
            self, "IAMPolicyTwitterStreamKinesisFHToS3",
            policy_name="KinesisFirehoseToS3-{}".format(self.stack_name),
            statements=[self.s3_iam_policy_statement],
        )

        self.iam_policy.attach_to_role(self.iam_role)

        # Because kinesis firehose bindings are to direct CF, we have to create IAM policy/role and attach on our own
        self.curator_firehose = aws_kinesisfirehose.CfnDeliveryStream(
            self, "CuratorFirehoseStream",
            delivery_stream_name = "{}-curator".format(self.stack_name),
            delivery_stream_type = "DirectPut",
            s3_destination_configuration={
                'bucketArn': self.output_bucket.bucket_arn,
                'bufferingHints': {
                    'intervalInSeconds': 120,
                    'sizeInMBs': 10
                },
                'compressionFormat': 'UNCOMPRESSED',
                'roleArn': self.iam_role.role_arn,
                'cloudWatchLoggingOptions': {
                    'enabled': True,
                    'logGroupName': "{}-curator".format(self.stack_name),
                    'logStreamName': 's3BackupCurator'
                },
                'prefix': 'twitter-curated/'
            },
        )

        def zip_package():
            cwd = os.getcwd()
            file_name = 'curator-lambda.zip'
            zip_file = cwd + '/' + file_name

            os.chdir('src/')
            sh.zip('-r9', zip_file, '.')
            os.chdir(cwd)

            return file_name, zip_file

        _, zip_file = zip_package()

        self.twitter_stream_curator_lambda_function = aws_lambda.Function(
            self, "TwitterStreamCuratorLambdaFunction",
            function_name="{}-curator".format(self.stack_name),
            code=aws_lambda.AssetCode(zip_file),
            handler="sentiment_analysis.lambda_handler",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            tracing=aws_lambda.Tracing.ACTIVE,
            description="Triggers from S3 PUT event for twitter stream data and transorms it to clean json syntax with sentiment analysis attached",
            environment={
                "STACK_NAME": self.stack_name,
                "FIREHOSE_STREAM": self.curator_firehose.delivery_stream_name
            },
            memory_size=128,
            timeout=core.Duration.seconds(120),
            log_retention=aws_logs.RetentionDays.ONE_WEEK,
        )

        # Permission to talk to comprehend for sentiment analysis
        self.comprehend_iam_policy_statement = aws_iam.PolicyStatement()
        self.comprehend_iam_policy_statement.add_actions('comprehend:*')
        self.comprehend_iam_policy_statement.add_all_resources()
        self.twitter_stream_curator_lambda_function.add_to_role_policy(self.comprehend_iam_policy_statement)

        # Permission to put in kinesis firehose
        self.curator_firehose_iam_policy_statement = aws_iam.PolicyStatement()
        self.curator_firehose_iam_policy_statement.add_actions('firehose:Put*')
        self.curator_firehose_iam_policy_statement.add_resources(self.curator_firehose.attr_arn)
        self.twitter_stream_curator_lambda_function.add_to_role_policy(self.curator_firehose_iam_policy_statement)

        # Attaching the policy to the IAM role for KFH
        self.output_bucket.grant_read(self.twitter_stream_curator_lambda_function)

        self.twitter_stream_curator_lambda_function.add_event_source(
            aws_lambda_event_sources.S3EventSource(
                bucket=self.output_bucket,
                events=[
                    aws_s3.EventType.OBJECT_CREATED
                ],
                filters=[
                    aws_s3.NotificationKeyFilter(
                        prefix="twitter-raw/"
                    )
                ]
            )
        )
コード例 #17
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Change pillow arn to latest version in your region. See https://github.com/keithrozario/Klayers/tree/master/deployments/python3.8/arns
        layerpillow = lambda_.LayerVersion.from_layer_version_arn(
            self, 'pillowlayerversion', 'Your_Region_Pillow_ARN')

        # --------------------------------------------------------
        # Create buckets for thumbnail
        # --------------------------------------------------------

        bucket = s3.Bucket(self,
                           id='SourceBucket',
                           block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
                           bucket_name='<your-name>-thumbnail-upload-372')

        ResizedBucket = s3.Bucket(
            self,
            id='ResizedBucket',
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            bucket_name='<your-name>-thumbnail-upload-372-resized')

        # -------------------------------------------------------------------------------------
        # Create IAM role for lambda invocation, write to X-Ray and Get and Put objects into s3
        # -------------------------------------------------------------------------------------
        Role = iam.Role(
            self,
            "Lambda-role",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonS3ReadOnlyAccess"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AWSXrayWriteOnlyAccess")
            ])

        Role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=[bucket.bucket_arn + "/*"],
                                actions=["s3:GetObject"]))

        Role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=[ResizedBucket.bucket_arn + "/*"],
                                actions=["s3:PutObject"]))

        # ------------------------------------------
        # Create a lambda layer for botos3 and X-Ray
        # ------------------------------------------
        layerxray = lambda_.LayerVersion(
            self,
            'xray_layer',
            code=lambda_.Code.from_asset(
                os.path.join(os.getcwd(),
                             "lambda_layer_x_ray_stack/layers/xray")),
            description="Lambda Layer containing Xray SDK Python Library",
            compatible_runtimes=[
                lambda_.Runtime.PYTHON_3_7,
                lambda_.Runtime.PYTHON_3_8,
            ],
            removal_policy=RemovalPolicy.DESTROY,
        )

        # -------------------------------------------
        # Create Lambda Function
        # -------------------------------------------
        xray_trace_lambda = lambda_.Function(
            self,
            id='xray-sample-app',
            function_name='xray-handler',
            code=lambda_.Code.from_asset(
                os.path.join(os.getcwd(), "lambda_code")),
            runtime=lambda_.Runtime.PYTHON_3_8,
            handler="lambda-handler.lambda_handler",
            layers=[layerxray, layerpillow],
            role=Role,
            tracing=lambda_.Tracing.ACTIVE)

        xray_trace_lambda.add_event_source(
            eventsources.S3EventSource(
                bucket,
                events=[s3.EventType.OBJECT_CREATED_PUT],
            ))
コード例 #18
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        """
        initialize function for CDK
        """
        super().__init__(scope, construct_id, **kwargs)

        # -------------------------------
        # S3 Bucket for Manifests
        # -------------------------------

        qs_gov_bucket = s3.Bucket(
            self,
            id=f"{cf.PROJECT}-Bucket",
        )
        bucket_name = qs_gov_bucket.bucket_name

        # -------------------------------
        # IAM
        # -------------------------------

        list_roles_policy = iam.ManagedPolicy(
            self,
            id=f"{cf.PROJECT}-ListRolesPolicy",
            managed_policy_name=f"{cf.PROJECT}-ListRolesPolicy",
            description=None,
            path="/",
            statements=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                    actions=["iam:ListRoles", "iam:ListAccountAliases"],
                )
            ],
        )

        federated_quicksight_policy = iam.ManagedPolicy(
            self,
            id=f"{cf.PROJECT}-FederatedQuickSightPolicy",
            managed_policy_name=f"{cf.PROJECT}-FederatedQuickSightPolicy",
            statements=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=[
                        f"arn:aws:iam::{cf.ACCOUNT}:saml-provider/{cf.OKTA_IDP_NAME}"
                    ],
                    actions=["sts:AssumeRoleWithSAML"],
                    conditions={
                        "StringEquals": {
                            "saml:aud": "https://signin.aws.amazon.com/saml"
                        }
                    },
                )
            ],
        )

        okta_federated_principal = iam.FederatedPrincipal(
            federated=
            f"arn:aws:iam::{cf.ACCOUNT}:saml-provider/{cf.OKTA_IDP_NAME}",
            assume_role_action="sts:AssumeRoleWithSAML",
            conditions={
                "StringEquals": {
                    "SAML:aud": "https://signin.aws.amazon.com/saml"
                }
            },
        )

        federated_quicksight_role = iam.Role(
            self,
            id=f"{cf.PROJECT}-{cf.QS_FEDERATED_ROLE_NAME}",
            role_name=f"{cf.QS_FEDERATED_ROLE_NAME}",
            assumed_by=okta_federated_principal,
            description=
            "Allow Okta to Federate Login & User Creation to QuickSight",
            managed_policies=[federated_quicksight_policy],
        )

        iam.User(
            self,
            id=f"{cf.PROJECT}-OktaSSOUser",
            user_name=f"{cf.PROJECT}-OktaSSOUser",
            managed_policies=[list_roles_policy],
        )

        # -------------------------------
        # Lambda Layers
        # -------------------------------

        path_src = os.path.join(cf.PATH_SRC, "")

        sp.call(["make", "bundle"], cwd=path_src)

        requests_layer = _lambda.LayerVersion(
            self,
            f"{cf.PROJECT}-requests-layer",
            code=_lambda.Code.from_asset(os.path.join(path_src,
                                                      "requests.zip")))

        sp.call(["make", "clean"], cwd=path_src)

        # -------------------------------
        # Lambda Functions
        # -------------------------------

        # iam role for Lambdas

        qs_governance_policy = iam.ManagedPolicy(
            self,
            id=f"{cf.PROJECT}-LambdaPolicy",
            managed_policy_name=f"{cf.PROJECT}-LambdaPolicy",
            statements=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                    actions=["ses:SendEmail", "ses:SendRawEmail"],
                ),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=[
                        f"arn:aws:secretsmanager:{cf.REGION}:{cf.ACCOUNT}:secret:{cf.OKTA_SECRET_ID}*",
                        f"arn:aws:secretsmanager:{cf.REGION}:{cf.ACCOUNT}:secret:{cf.SLACK_SECRET_ID}*"
                    ],
                    actions=[
                        "secretsmanager:GetSecretValue",
                    ],
                ),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=[
                        f"arn:aws:iam::{cf.ACCOUNT}:policy/{cf.QS_PREFIX}*"
                    ],
                    actions=[
                        "iam:CreatePolicy",
                        "iam:GetPolicy",
                        "iam:DeletePolicy",
                    ],
                ),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                    actions=["iam:ListPolicies"],
                ),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                    actions=[
                        "glue:GetDatabase",
                        "glue:GetDatabases",
                        "glue:GetTable",
                        "glue:GetPartitions",
                        "glue:GetPartition",
                        "glue:GetTables",
                    ],
                ),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                    actions=["quicksight:*", "ds:*"],
                ),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=[
                        f"arn:aws:s3:::{bucket_name}/*",
                        f"arn:aws:s3:::{bucket_name}*"
                    ],
                    actions=["s3:*"],
                ),
            ],
        )

        qs_governance_role = iam.Role(
            self,
            id=f"{cf.PROJECT}-QuickSightPermissionMappingRole",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaBasicExecutionRole"),
                qs_governance_policy,
            ],
        )

        # Lambdas

        get_okta_information_lambda = _lambda.Function(
            self,
            id=f"{cf.PROJECT}-GetOktaInfo",
            function_name=f"{cf.PROJECT}-GetOktaInformation",
            handler="get_okta_information.handler",
            role=qs_governance_role,
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset(os.path.join(cf.PATH_SRC, "pkg")),
            environment={
                "OKTA_SECRET_ID": cf.OKTA_SECRET_ID,
                "QS_PREFIX": cf.QS_PREFIX,
                "QS_GOVERNANCE_BUCKET": bucket_name,
                "QS_FEDERATED_ROLE_NAME": f"{cf.QS_FEDERATED_ROLE_NAME}",
                "S3_PREFIX_USERS": cf.S3_PREFIX_USERS,
                "SLACK_SECRET_ID": cf.SLACK_SECRET_ID
            },
            memory_size=256,
            timeout=core.Duration.seconds(180),
            layers=[requests_layer],
        )

        # Lamda Okta to QuickSight Mappers

        qs_user_governance_lambda = _lambda.Function(
            self,
            id=f"{cf.PROJECT}-QSUserGovernance",
            function_name=f"{cf.PROJECT}-QSUserGovernance",
            handler="qs_user_governance.handler",
            role=qs_governance_role,
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset(os.path.join(cf.PATH_SRC, "pkg")),
            environment={
                "QS_FEDERATED_ROLE_NAME": f"{cf.QS_FEDERATED_ROLE_NAME}",
                "QS_PREFIX": cf.QS_PREFIX,
                "QS_ADMIN_OKTA_GROUP": cf.QS_ADMIN_OKTA_GROUP,
                "QS_AUTHOR_OKTA_GROUP": cf.QS_AUTHOR_OKTA_GROUP,
                "QS_READER_OKTA_GROUP": cf.QS_READER_OKTA_GROUP,
                "SLACK_SECRET_ID": cf.SLACK_SECRET_ID
            },
            memory_size=256,
            timeout=core.Duration.seconds(180),
            layers=[requests_layer],
        )

        qs_asset_governance_lambda = _lambda.Function(
            self,
            id=f"{cf.PROJECT}-QSAssetGovernance",
            function_name=f"{cf.PROJECT}-QSAssetGovernance",
            handler="qs_asset_governance.handler",
            role=qs_governance_role,
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset(os.path.join(cf.PATH_SRC, "pkg")),
            memory_size=256,
            timeout=core.Duration.seconds(180),
            layers=[requests_layer],
            environment={
                "QS_GOVERNANCE_BUCKET": f"{bucket_name}",
                "S3_PREFIX_USERS": cf.S3_PREFIX_USERS,
                "S3_PREFIX_POLICIES": cf.S3_PREFIX_POLICIES,
                "STAGE": cf.DEPLOYMENT_STAGE,
                "SLACK_SECRET_ID": cf.SLACK_SECRET_ID,
                "VERIFIED_EMAIL": cf.VERIFIED_EMAIL
            })

        qs_policy_governance_lambda = _lambda.Function(
            self,
            id=f"{cf.PROJECT}-QSPolicyGovernance",
            function_name=f"{cf.PROJECT}-QSPolicyGovernance",
            handler="qs_policy_governance.handler",
            role=qs_governance_role,
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset(os.path.join(cf.PATH_SRC, "pkg")),
            environment={
                "QS_ADMIN_OKTA_GROUP": cf.QS_ADMIN_OKTA_GROUP,
                "QS_AUTHOR_OKTA_GROUP": cf.QS_AUTHOR_OKTA_GROUP,
                "QS_AUTHOR_BASE_POLICY": cf.QS_AUTHOR_BASE_POLICY,
                "QS_PREFIX": cf.QS_PREFIX,
                "QS_GOVERNANCE_BUCKET": f"{bucket_name}",
                "S3_PREFIX_POLICIES": cf.S3_PREFIX_POLICIES,
                "QS_SUPERUSER": cf.QS_SUPERUSER,
                "DEPLOYMENT_STAGE": cf.DEPLOYMENT_STAGE,
                "SLACK_SECRET_ID": cf.SLACK_SECRET_ID
            },
            memory_size=256,
            timeout=core.Duration.seconds(180),
            layers=[requests_layer])

        qs_resource_cleanup_lambda = _lambda.Function(
            self,
            id=f"{cf.PROJECT}-QSResourceCleanup",
            function_name=f"{cf.PROJECT}-QSResourceCleanup",
            handler="qs_resource_cleanup.handler",
            role=qs_governance_role,
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset(os.path.join(cf.PATH_SRC, "pkg")),
            memory_size=256,
            timeout=core.Duration.seconds(180),
            layers=[requests_layer],
            environment={
                "QS_PREFIX": cf.QS_PREFIX,
                "QS_SUPERUSER": cf.QS_SUPERUSER,
                "QS_GOVERNANCE_BUCKET": f"{bucket_name}",
                "S3_PREFIX_ASSETS": cf.S3_PREFIX_ASSETS,
                "S3_PREFIX_USERS": cf.S3_PREFIX_USERS,
                "S3_PREFIX_POLICIES": cf.S3_PREFIX_POLICIES,
                "SLACK_SECRET_ID": cf.SLACK_SECRET_ID
            })

        # -------------------------------
        # S3 Event Triggers
        # -------------------------------

        qs_user_governance_lambda.add_event_source(
            lambda_event_sources.S3EventSource(
                bucket=qs_gov_bucket,
                events=[s3.EventType.OBJECT_CREATED],
                filters=[s3.NotificationKeyFilter(prefix=cf.S3_PREFIX_USERS)],
            ))

        qs_policy_governance_lambda.add_event_source(
            lambda_event_sources.S3EventSource(
                bucket=qs_gov_bucket,
                events=[s3.EventType.OBJECT_CREATED],
                filters=[
                    s3.NotificationKeyFilter(prefix=cf.S3_PREFIX_POLICIES)
                ],
            ))

        # -------------------------------
        # CloudWatch Event Rules (30 minutes)
        # -------------------------------

        lambda_schedule = events.Schedule.rate(core.Duration.minutes(30))
        get_okta_info_target = events_targets.LambdaFunction(
            handler=get_okta_information_lambda)
        events.Rule(
            self,
            id=f"{cf.PROJECT}-GetOktaInfoScheduledEvent",
            description="The scheduled CloudWatch event trigger for the Lambda",
            enabled=True,
            schedule=lambda_schedule,
            targets=[get_okta_info_target],
        )

        qs_asset_gov_target = events_targets.LambdaFunction(
            handler=qs_asset_governance_lambda)
        events.Rule(
            self,
            id=f"{cf.PROJECT}-QSAssetGovScheduledEvent",
            description="The scheduled CloudWatch event trigger for the Lambda",
            enabled=True,
            schedule=lambda_schedule,
            targets=[qs_asset_gov_target],
        )

        qs_cleanup_target = events_targets.LambdaFunction(
            handler=qs_resource_cleanup_lambda)
        events.Rule(
            self,
            id=f"{cf.PROJECT}-QSCleanupScheduledEvent",
            description="The scheduled CloudWatch event trigger for the Lambda",
            enabled=True,
            schedule=lambda_schedule,
            targets=[qs_cleanup_target],
        )

        # -------------------------------
        # Athena WorkGroup
        # -------------------------------

        workgroup_encryption = athena.CfnWorkGroup.EncryptionConfigurationProperty(
            encryption_option='SSE_S3')

        workgroup_output = athena.CfnWorkGroup.ResultConfigurationProperty(
            output_location="s3://" + cf.ATHENA_OUTPUT_LOC,
            encryption_configuration=workgroup_encryption)

        quicksight_athena_workgroup = athena.CfnWorkGroup(
            self,
            id=f"{cf.PROJECT}-workgroup",
            name=f"{cf.QS_PREFIX}-workgroup",
            description="workgroup for QuickSight Data Source operations",
            recursive_delete_option=True,
            work_group_configuration=athena.CfnWorkGroup.
            WorkGroupConfigurationProperty(
                result_configuration=workgroup_output))
        workgroup_name = quicksight_athena_workgroup.name

        # -------------------------------
        # S3 Object Deployments
        # -------------------------------

        policy_manifest_deploy = s3_deploy.BucketDeployment(
            self,
            id=f"{cf.PROJECT}-PolicyManifestDeploy",
            sources=[
                s3_deploy.Source.asset(
                    os.path.join(cf.PATH_ROOT, "qs_config/policies"))
            ],
            destination_bucket=qs_gov_bucket,
            destination_key_prefix=cf.S3_PREFIX_POLICIES)

        # -------------------------------
        # QuickSight IAM Policy Assignments
        # -------------------------------

        qs_author_base_policy = iam.ManagedPolicy(
            self,
            id=f"{cf.PROJECT}-{cf.QS_AUTHOR_BASE_POLICY}",
            managed_policy_name=f"{cf.QS_AUTHOR_BASE_POLICY}",
            statements=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=[
                        f"arn:aws:athena:{cf.REGION}:{cf.ACCOUNT}:workgroup/{workgroup_name}"
                    ],
                    actions=[
                        "athena:GetNamedQuery", "athena:CancelQueryExecution",
                        "athena:CreateNamedQuery", "athena:DeleteNamedQuery",
                        "athena:StartQueryExecution",
                        "athena:StopQueryExecution", "athena:GetWorkGroup",
                        "athena:GetNamedQuery", "athena:GetQueryResults",
                        "athena:GetQueryExecution",
                        "athena:BatchGetQueryExecution",
                        "athena:BatchGetNamedQuery", "athena:ListNamedQueries",
                        "athena:ListQueryExecutions",
                        "athena:GetQueryResultsStream"
                    ]),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                    actions=[
                        "athena:ListWorkGroups", "athena:ListDataCatalogs",
                        "athena:StartQueryExecution",
                        "athena:GetQueryExecution",
                        "athena:GetQueryResultsStream",
                        "athena:ListTableMetadata", "athena:GetTableMetadata"
                    ]),
                iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                    resources=[
                                        "arn:aws:s3:::ast-datalake-*",
                                        "arn:aws:s3:::aws-athena-query-*",
                                        f"arn:aws:s3:::{bucket_name}*",
                                        f"arn:aws:s3:::{cf.ATHENA_OUTPUT_LOC}*"
                                    ],
                                    actions=[
                                        "s3:List*",
                                        "s3:Get*",
                                    ]),
                iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                    resources=["*"],
                                    actions=["s3:ListAllMyBuckets"]),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=[
                        "arn:aws:s3:::aws-athena-query-*",
                        f"arn:aws:s3:::{bucket_name}/{cf.QS_PREFIX}/athena-results*"
                    ],
                    actions=["s3:Put*"]),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=[
                        f"arn:aws:athena:{cf.REGION}:{cf.ACCOUNT}:datacatalog/AwsDataCatalog"
                    ],
                    actions=["athena:ListDatabases"]),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                    actions=["kms:Decrypt", "kms:GenerateDataKey"]),
            ],
        )