def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ipam_endpoint = self.node.try_get_context("apiEndpoint") + "vpc"

        custom_resource_handler = PythonFunction(self, "CreateLambda",
            entry="./src_custom_resource", 
            runtime=aws_lambda.Runtime.PYTHON_3_6
            )
        custom_resource_handler.add_environment("VENDING_MACHINE_API", ipam_endpoint)

# By default Lambda execution role doesn't allow cross-account API invoke
        custom_resource_handler.role.add_to_policy(aws_iam.PolicyStatement(
            resources=["*"],
            actions=["execute-api:Invoke"]
            ))

# First Custom Resource, get free CIDR from the Vending Machine
        cr_create = core.CustomResource(self, "Resource1", 
            resource_type="Custom::GetSubnet", 
            service_token=custom_resource_handler.function_arn
            )
# Then provision a new VPC with private subnets to the given CIDR range
# NOTE: we are using L1 construct for VPC. L2 construct didn't work with custom resources.
        cidr = cr_create.get_att("vpcCidrBlock").to_string()
        subnet0_cidr = cr_create.get_att("subnet0CidrBlock").to_string()
        subnet1_cidr = cr_create.get_att("subnet1CidrBlock").to_string()
        subnet2_cidr = cr_create.get_att("subnet2CidrBlock").to_string()
        subnet3_cidr = cr_create.get_att("subnet3CidrBlock").to_string()
        
        vpc = aws_ec2.CfnVPC(self, "VPC", cidr_block=cidr)

        aws_ec2.CfnSubnet(self, "Private0", 
            vpc_id=vpc.ref, 
            cidr_block=subnet0_cidr)

        aws_ec2.CfnSubnet(self, "Private1", 
            vpc_id=vpc.ref, 
            cidr_block=subnet1_cidr)

        aws_ec2.CfnSubnet(self, "Private2", 
            vpc_id=vpc.ref, 
            cidr_block=subnet2_cidr)

        aws_ec2.CfnSubnet(self, "Private3", 
            vpc_id=vpc.ref, 
            cidr_block=subnet3_cidr)


# Lastly update the Vpc Id to the Vending Machine
        cr_update = core.CustomResource(self, "Resource2", 
            resource_type="Custom::PutVpcId", 
            properties={
                "vpcId":vpc.ref,
                "cidrBlock":cidr
            },
            service_token=custom_resource_handler.function_arn
            )

        core.CfnOutput(self, "cidrBlock", value=cidr)
Esempio n. 2
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here

        # EASIER IF SIMPLE python function...
        # my_function = _lambda.Function(self,
        #     "my-function",
        #     runtime=_lambda.Runtime.PYTHON_3_8,
        #     code=_lambda.Code.asset("src"),
        #     handler="main.handler"
        # )

        my_function = PythonFunction(
            self,
            "my-function",
            entry="src",
            index="main.py",
            handler="handler",
            runtime=_lambda.Runtime.PYTHON_3_8,
        )

        rule = events.Rule(self,
                           "Rule",
                           schedule=events.Schedule.cron(minute="0",
                                                         hour="18",
                                                         month="*",
                                                         week_day="MON-FRI",
                                                         year="*"))

        rule.add_target(targets.LambdaFunction(my_function))
Esempio n. 3
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        table_name = "posts2"
        function_name = "cl2"
        email = "*****@*****.**"

        table = Table(
            self,
            "cl_posts",
            table_name=table_name,
            partition_key=Attribute(name="url", type=AttributeType.STRING),
            time_to_live_attribute="ttl",
        )

        function = PythonFunction(
            self,
            "cl_function",
            function_name=function_name,
            entry="src",
            index="app.py",
            runtime=Runtime.PYTHON_3_8,
            environment={
                "cl_email": email,
                "cl_table_name": table_name
            },
            timeout=Duration.seconds(300),
            initial_policy=[
                PolicyStatement(
                    actions=["ses:SendEmail", "ses:VerifyEmailIdentity"],
                    resources=[
                        f"arn:aws:ses:{self.region}:{self.account}:identity/{email}"
                    ],
                ),
                PolicyStatement(
                    actions=[
                        "dynamodb:BatchGetItem", "dynamodb:BatchWriteItem"
                    ],
                    resources=[table.table_arn],
                ),
            ],
        )

        with open("events/event.json") as f:
            event = json.load(f)

        Rule(
            self,
            "cl_schedule",
            schedule=Schedule.expression("cron(0 19 * * ? *)"),
            targets=[
                LambdaFunction(function,
                               event=RuleTargetInput.from_object(event))
            ],
        )
Esempio n. 4
0
    def __init__(self, scope: cdk.Construct, construct_id: str, context,
                 resource: apigw.Resource, user_pool: cognito.UserPool,
                 user_client: cognito.UserPoolClient,
                 user_pool_parameter: ssm.StringParameter,
                 launcher_network_config_parameter: ssm.StringParameter,
                 launcher_task_definition: GrantingTaskDefinition,
                 launcher_cluster: ecs.Cluster, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        server_domain = self.node.try_get_context('server_domain')

        servers_table = db.Table.from_table_name(self, 'servers-dynamo',
                                                 context['servers_table_name'])
        servers_key = kms.Key.from_key_arn(
            self, 'ServersKey',
            f'arn:aws:kms:{context["region"]}:{context["account_id"]}:key/{context["kms_key_id"]}'
        )

        class ServerLambda(PythonFunction):
            def __init__(self, scope, construct_id, handler: str):
                super().__init__(
                    scope,
                    construct_id,
                    entry='servers_lambda',
                    index='views.py',
                    handler=handler,
                    runtime=_lambda.Runtime.PYTHON_3_8,
                    timeout=cdk.Duration.seconds(30),
                    environment={
                        'SERVER_DOMAIN':
                        server_domain,
                        'DYNAMODB_SERVERS_TABLE_NAME':
                        servers_table.table_name,
                        'LAUNCHER_NETWORK_CONFIG_PARAMETER':
                        launcher_network_config_parameter.parameter_name,
                        'LAUNCHER_TASK_ARN':
                        launcher_task_definition.task_definition_arn,
                        'CLUSTER_ARN':
                        launcher_cluster.cluster_arn
                    })
                servers_table.grant_read_write_data(self)
                servers_key.grant_encrypt_decrypt(self)
                launcher_network_config_parameter.grant_read(self)
                launcher_task_definition.grant_run(self)

        servers_lambda = ServerLambda(self, 'ServersLambda', handler='servers')
        server_lambda = ServerLambda(self, 'ServerLambda', handler='server')

        user_pool_parameter_lambda = PythonFunction(
            self,
            'UserPoolParameterLambda',
            entry='parameter_lambda',
            index='main.py',
            handler='main',
            runtime=_lambda.Runtime.PYTHON_3_8,
            timeout=cdk.Duration.seconds(30),
            environment={
                'USER_POOL_PARAMETER': user_pool_parameter.parameter_name
            })
        user_pool_parameter.grant_read(user_pool_parameter_lambda)

        user_pool_authorizer = apigw.CognitoUserPoolsAuthorizer(
            self, 'UserPoolAuthorizer', cognito_user_pools=[user_pool])

        # /api
        api = resource.add_resource('api')

        # /api/user_pool
        parameter = api.add_resource(
            'user_pool',
            default_integration=apigw.LambdaIntegration(
                user_pool_parameter_lambda),
            default_method_options=apigw.MethodOptions(
                authorization_type=apigw.AuthorizationType.NONE))
        parameter.add_method('GET')

        # /api/servers
        servers = api.add_resource(
            'servers',
            default_integration=apigw.LambdaIntegration(servers_lambda),
            default_method_options=apigw.MethodOptions(
                authorizer=user_pool_authorizer))
        servers.add_method('GET')
        servers.add_method('PUT')

        # /api/servers/{hostname}
        server = servers.add_resource(
            '{hostname}',
            default_integration=apigw.LambdaIntegration(server_lambda),
            default_method_options=apigw.MethodOptions(
                authorizer=user_pool_authorizer))
        server.add_method('GET')
        server.add_method('PUT')
        server.add_method('POST')
    def __init__(self, scope: cdk.Construct, construct_id: str, config, vpc: IVpc, instance: IInstance, neo4j_user_secret: ISecret, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        bucket = Bucket(self, "s3-bucket-altimeter", 
            bucket_name=config["s3_bucket"],
            encryption=BucketEncryption.UNENCRYPTED, #.S3_MANAGED, # Disable encryption since it's not really required and it conflicts with SCP guardrails set by Control Tower on the Audit account.
            block_public_access=BlockPublicAccess.BLOCK_ALL
        )

        cluster = Cluster(self, "ecs-cluster-altimeter", 
            cluster_name="ecsclstr-altimeter--default",
            vpc=vpc               
        )

        task_role = Role(self, "iam-role-altimeter-task-role",
            assumed_by=ServicePrincipal("ecs-tasks.amazonaws.com"),
            # It appears that within the account where the scanner is running, the task role is (partially) used for scanning resources (rather than the altimeter-scanner-access role).      
            managed_policies=[
                ManagedPolicy.from_aws_managed_policy_name('SecurityAudit'),
                ManagedPolicy.from_aws_managed_policy_name('job-function/ViewOnlyAccess')
            ]
        )

        task_definition = FargateTaskDefinition(self, "ecs-fgtd-altimeter",
            task_role=task_role,
            memory_limit_mib=self.MEMORY_LIMIT,
            cpu=self.CPU
        )

        docker_path = os.path.join(os.path.curdir,"..")

        image_asset = DockerImageAsset(self, 'ecr-assets-dia-altimeter', 
            directory=docker_path,
            file="scanner.Dockerfile"
        )            

        task_definition.add_container("ecs-container-altimeter",            
            image= ContainerImage.from_docker_image_asset(image_asset),
            # memory_limit_mib=self.MEMORY_LIMIT,
            # cpu=self.CPU,
            environment= {
                "CONFIG_PATH": config["altimeter_config_path"],
                "S3_BUCKET": config["s3_bucket"]
            },
            logging= AwsLogDriver(
                stream_prefix= 'altimeter',
                log_retention= RetentionDays.TWO_WEEKS
            )
        )

        task_definition.add_to_task_role_policy(PolicyStatement(
            resources=["arn:aws:iam::*:role/"+config["account_execution_role"]],
            actions=['sts:AssumeRole']
        ))

        task_definition.add_to_task_role_policy(PolicyStatement(
            resources=[
                "arn:aws:s3:::"+config["s3_bucket"],
                "arn:aws:s3:::"+config["s3_bucket"]+"/*"
            ],
            actions=["s3:GetObject*",
                "s3:GetBucket*",
                "s3:List*",
                "s3:DeleteObject*",
                "s3:PutObject",
                "s3:Abort*",
                "s3:PutObjectTagging"]
        ))

        # Grant the ability to record the stdout to CloudWatch Logs
        # TODO: Refine
        task_definition.add_to_task_role_policy(PolicyStatement(
            resources=["*"],
            actions=['logs:*']
        ))

        # Trigger task every 24 hours
        Rule(self, "events-rule-altimeter-daily-scan",
            rule_name="evrule--altimeter-daily-scan",
            schedule=Schedule.cron(hour="0", minute="0"),
            description="Daily altimeter scan",
            targets=[EcsTask(
                task_definition=task_definition,
                cluster=cluster,
                subnet_selection=SubnetSelection(subnet_type=SubnetType.PRIVATE)
            )]
        )

        # Trigger task manually via event
        Rule(self, "events-rule-altimeter-manual-scan",
            rule_name="evrule--altimeter-manual-scan",
            event_pattern=EventPattern(source=['altimeter']), 
            description="Manual altimeter scan",
            targets=[EcsTask(
                task_definition=task_definition,
                cluster=cluster,
                subnet_selection=SubnetSelection(subnet_type=SubnetType.PRIVATE)
            )]
        )        


        # Don't put Neo4j Importer lambda in a separate stack since it causes a circular reference with the S3 event source, and using an imported bucket as event source is not possible (you need a Bucket, not an IBucket)



        neo4j_importer_function = PythonFunction(self, 'lambda-function-neo4j-importer',
            function_name="function-altimeter--neo4j-importer",             
            entry="../neo4j-importer",
            index="app.py",
            handler="lambda_handler",
            runtime=Runtime.PYTHON_3_8,
            memory_size=256,
            timeout=cdk.Duration.seconds(60),
            vpc=vpc,
            vpc_subnets=SubnetSelection(subnets=vpc.select_subnets(subnet_group_name='Private').subnets),
            environment={
                "neo4j_address": instance.instance_private_ip,
                "neo4j_user_secret_name": neo4j_user_secret.secret_name
            }
        )

        neo4j_importer_function.add_event_source(
            S3EventSource(bucket,
                events= [EventType.OBJECT_CREATED, EventType.OBJECT_REMOVED],
                filters= [ { "prefix": "raw/", "suffix": ".rdf"}]
            )
        )

        # Grant lambda read/write access to the S3 bucket for reading raw rdf, writing prepared rdf and generating signed uri
        bucket.grant_read_write(neo4j_importer_function.role)
        # Grant lambda read access to the neo4j user secret
        neo4j_user_secret.grant_read(neo4j_importer_function.role)
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        submit_lambda = PythonFunction(self,
                                       'Submit',
                                       handler='handler',
                                       index='submit.py',
                                       entry=os.path.join(
                                           os.getcwd(), 'lambdas'),
                                       runtime=lambda_.Runtime.PYTHON_3_8)

        get_status_lambda = PythonFunction(self,
                                           'Status',
                                           handler='handler',
                                           index='status.py',
                                           entry=os.path.join(
                                               os.getcwd(), 'lambdas'),
                                           runtime=lambda_.Runtime.PYTHON_3_8)

        submit_job = tasks.LambdaInvoke(
            self,
            "Submit Job",
            lambda_function=submit_lambda,
            # Lambda's result is in the attribute `Payload`
            output_path="$.Payload")

        wait_x = sfn.Wait(self,
                          "Wait X Seconds",
                          time=sfn.WaitTime.seconds_path("$.waitSeconds"))

        get_status = tasks.LambdaInvoke(
            self,
            "Get Job Status",
            lambda_function=get_status_lambda,
            # Pass just the field named "guid" into the Lambda, put the
            # Lambda's result in a field called "status" in the response
            output_path="$.Payload")

        job_failed = sfn.Fail(self,
                              "Job Failed",
                              cause="AWS Batch Job Failed",
                              error="DescribeJob returned FAILED")

        final_status = tasks.LambdaInvoke(
            self,
            "Get Final Job Status",
            lambda_function=get_status_lambda,
            # Use "guid" field as input
            output_path="$.Payload")

        definition = submit_job.next(wait_x).next(get_status).next(
            sfn.Choice(self, "Job Complete?").when(
                sfn.Condition.string_equals("$.status", "FAILED"),
                job_failed).when(
                    sfn.Condition.string_equals("$.status", "SUCCEEDED"),
                    final_status).otherwise(wait_x))

        sfn.StateMachine(self,
                         "StateMachine",
                         definition=definition,
                         timeout=cdk.Duration.minutes(5))
Esempio n. 7
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self.ssm_signing_secret = ssm.StringParameter(self,
                                                      "SLACK_SIGNING_SECRET",
                                                      string_value="xxxx")
        self.ssm_bot_token = ssm.StringParameter(self,
                                                 "SLACK_BOT_TOKEN",
                                                 string_value="xoxb-xxxx")
        self.ssm_kibela_team = ssm.StringParameter(self,
                                                   "KIBELA_TEAM",
                                                   string_value="teamname")
        self.ssm_kibela_token = ssm.StringParameter(self,
                                                    "KIBELA_TOKEN",
                                                    string_value="secret/xxxx")

        self.public_s3 = s3.Bucket(self,
                                   "PublicS3Bucket",
                                   public_read_access=True)

        self.private_s3 = s3.Bucket(self, "PrivateS3Bucket")

        self.step_lambda = lambda_.DockerImageFunction(
            self,
            "StepFunction",
            code=lambda_.DockerImageCode.from_image_asset(
                path.join(path.dirname(__name__), "../wordcloud-app"),
                cmd=["app.handler"],
                entrypoint=["/usr/local/bin/python", "-m", "awslambdaric"],
            ),
            environment={
                "SSM_KIBELA_TEAM": self.ssm_kibela_team.parameter_name,
                "SSM_KIBELA_TOKEN": self.ssm_kibela_token.parameter_name,
                "S3_PUBLIC": self.public_s3.bucket_name,
                "S3_PRIVATE": self.private_s3.bucket_name,
            },
            log_retention=logs.RetentionDays.FIVE_DAYS,
            timeout=core.Duration.seconds(600),
            memory_size=2048,
        )

        self.create_enumerate_statemachine()
        self.create_update_statemachine()
        self.create_unfurl_statemachine()

        self.bolt_lambda = PythonFunction(
            self,
            "BoltFunction",
            entry=path.join(path.dirname(__name__), "../bolt-app/app"),
            index="app.py",
            handler="handler",
            runtime=lambda_.Runtime.PYTHON_3_8,
            environment={
                # "SLACK_SIGNING_SECRET": self.secret.secret_value_from_json(
                #     "SLACK_SIGNING_SECRET"
                # ).to_string(),
                # "SLACK_BOT_TOKEN": self.secret.secret_value_from_json(
                #     "SLACK_BOT_TOKEN"
                # ).to_string(),
                "SSM_SLACK_SIGNING_SECRET":
                self.ssm_signing_secret.parameter_name,
                "SSM_SLACK_BOT_TOKEN":
                self.ssm_bot_token.parameter_name,
                "UPDATE_STATEMACHINE_ARN":
                self.update_note_statemachine.state_machine_arn,
                "UNFURL_STATEMACHINE_ARN":
                self.unfurl_statemachine.state_machine_arn,
            },
            log_retention=logs.RetentionDays.FIVE_DAYS,
            timeout=core.Duration.seconds(600),
        )
        self.apigw = apigateway.LambdaRestApi(self,
                                              "BoltRestGw",
                                              handler=self.bolt_lambda)

        self.ssm_kibela_team.grant_read(self.step_lambda)
        self.ssm_kibela_token.grant_read(self.step_lambda)
        self.public_s3.grant_read_write(self.step_lambda)
        self.public_s3.grant_delete(self.step_lambda)
        self.private_s3.grant_read_write(self.step_lambda)
        self.private_s3.grant_delete(self.step_lambda)

        self.bolt_lambda.add_to_role_policy(
            iam.PolicyStatement(resources=["*"],
                                actions=["lambda:InvokeFunction"]))
        self.ssm_signing_secret.grant_read(self.bolt_lambda)
        self.ssm_bot_token.grant_read(self.bolt_lambda)

        self.update_note_statemachine.grant_start_execution(self.bolt_lambda)
        self.unfurl_statemachine.grant(self.bolt_lambda,
                                       "states:StartSyncExecution")
Esempio n. 8
0
class CdkStack(core.Stack):
    def create_enumerate_statemachine(self):
        enumerate_job = tasks.LambdaInvoke(
            self,
            "Enumerate Notes Job",
            lambda_function=self.step_lambda,
            payload=sfn.TaskInput.from_object({"action": "enumerate_notes"}),
        )
        get_tf_job = tasks.LambdaInvoke(self,
                                        "Get Text Frequency Job",
                                        lambda_function=self.step_lambda,
                                        payload=sfn.TaskInput.from_object({
                                            "action":
                                            "update_tf",
                                            "id.$":
                                            "$.id",
                                            "contentUpdatedAt.$":
                                            "$.contentUpdatedAt",
                                            "isArchived.$":
                                            "$.isArchived",
                                        }),
                                        output_path="$.Payload")
        map_job = sfn.Map(self,
                          "Notes Map",
                          items_path="$.Payload.id_list",
                          max_concurrency=8)
        get_idf_job = tasks.LambdaInvoke(
            self,
            "Get Inter Document Frequency Job",
            lambda_function=self.step_lambda,
            payload=sfn.TaskInput.from_object({
                "action": "update_idf",
                "notes.$": "$"
            }),
        )
        map_tfidf_job = sfn.Map(self,
                                "TF*IDF Notes Map",
                                items_path="$.Payload.notes",
                                max_concurrency=100)
        get_tfidf_job = tasks.LambdaInvoke(
            self,
            "Get TF*IDF WordCloud Image Job",
            lambda_function=self.step_lambda,
            payload=sfn.TaskInput.from_object({
                "action": "update_tfidf_png",
                "id.$": "$.id",
                "contentUpdatedAt.$": "$.contentUpdatedAt",
                "isArchived.$": "$.isArchived",
            }),
        )

        definition = (enumerate_job.next(
            map_job.iterator(get_tf_job)).next(get_idf_job).next(
                map_tfidf_job.iterator(get_tfidf_job)))
        self.enumerate_statemachine = sfn.StateMachine(
            self,
            "EnumerateStateMachine",
            definition=definition,
            timeout=core.Duration.hours(5),
        )

    def create_update_statemachine(self):
        # update Workflow
        get_note_job = tasks.LambdaInvoke(
            self,
            "Get Note Job",
            lambda_function=self.step_lambda,
            payload=sfn.TaskInput.from_object({
                "action": "get_note_from_url",
                "url.$": "$.url"
            }),
        )
        get_tf_job_update = tasks.LambdaInvoke(
            self,
            "Get Text Frequency Job for Update",
            lambda_function=self.step_lambda,
            payload=sfn.TaskInput.from_object({
                "action":
                "update_tf",
                "id.$":
                "$.Payload.id",
                "contentUpdatedAt.$":
                "$.Payload.contentUpdatedAt",
                "isArchived.$":
                "$.Payload.isArchived",
            }),
        )
        get_idf_job_update = tasks.LambdaInvoke(
            self,
            "Get Inter Document Frequency Job for Update",
            lambda_function=self.step_lambda,
            payload=sfn.TaskInput.from_object({
                "action":
                "update_idf",
                "id.$":
                "$.Payload.id",
                "contentUpdatedAt.$":
                "$.Payload.contentUpdatedAt",
                "isArchived.$":
                "$.Payload.isArchived",
            }),
        )
        get_tfidf_job_update = tasks.LambdaInvoke(
            self,
            "Get TF*IDF WordCloud Image Job for Update",
            lambda_function=self.step_lambda,
            payload=sfn.TaskInput.from_object({
                "action":
                "update_tfidf_png",
                "id.$":
                "$.Payload.id",
                "contentUpdatedAt.$":
                "$.Payload.contentUpdatedAt",
                "isArchived.$":
                "$.Payload.isArchived",
            }),
        )

        update_note_definition = get_note_job.next(
            get_tf_job_update.next(
                get_idf_job_update.next(get_tfidf_job_update)))

        self.update_note_statemachine = sfn.StateMachine(
            self,
            "UpdateNoteStateMachine",
            definition=update_note_definition,
            timeout=core.Duration.minutes(10),
        )

    def create_unfurl_statemachine(self):
        map_job = sfn.Map(self,
                          "Unfurl Map",
                          items_path="$.links",
                          max_concurrency=10)
        get_note_job = tasks.LambdaInvoke(
            self,
            "Get Note Job for Unfurl",
            lambda_function=self.step_lambda,
            payload=sfn.TaskInput.from_object({
                "action": "get_note_from_url",
                "url.$": "$.url"
            }),
        )
        get_tf_job = tasks.LambdaInvoke(
            self,
            "Get Text Frequency Job for Unfurl",
            lambda_function=self.step_lambda,
            payload=sfn.TaskInput.from_object({
                "action":
                "update_tf",
                "id.$":
                "$.Payload.id",
                "url.$":
                "$.Payload.url",
                "contentUpdatedAt.$":
                "$.Payload.contentUpdatedAt",
                "isArchived.$":
                "$.Payload.isArchived",
            }),
        )
        get_idf_job = tasks.LambdaInvoke(
            self,
            "Get Inter Document Frequency Job for Unfurl",
            lambda_function=self.step_lambda,
            payload=sfn.TaskInput.from_object({
                "action":
                "update_idf",
                "id.$":
                "$.Payload.id",
                "url.$":
                "$.Payload.url",
                "contentUpdatedAt.$":
                "$.Payload.contentUpdatedAt",
                "isArchived.$":
                "$.Payload.isArchived",
            }),
        )
        get_tfidf_job = tasks.LambdaInvoke(
            self,
            "Get TF*IDF WordCloud Image Job for Unfurl",
            lambda_function=self.step_lambda,
            payload=sfn.TaskInput.from_object({
                "action":
                "update_tfidf_png",
                "id.$":
                "$.Payload.id",
                "url.$":
                "$.Payload.url",
                "contentUpdatedAt.$":
                "$.Payload.contentUpdatedAt",
                "isArchived.$":
                "$.Payload.isArchived",
            }),
        )
        unfurl_job = tasks.LambdaInvoke(
            self,
            "Get Attachment Job",
            lambda_function=self.step_lambda,
            payload=sfn.TaskInput.from_object({
                "action": "unfurl",
                "id.$": "$.Payload.id",
                "url.$": "$.Payload.url",
            }),
        )

        get_tf_job.next(get_idf_job.next(get_tfidf_job.next(unfurl_job)))

        choice_job = sfn.Choice(self, "Check for Update")
        choice_job.when(
            sfn.Condition.and_(
                sfn.Condition.is_timestamp("$.Payload.tfidfPngUpdatedAt"),
                sfn.Condition.timestamp_less_than_json_path(
                    "$.Payload.contentUpdatedAt",
                    "$.Payload.tfidfPngUpdatedAt"),
            ),
            unfurl_job,
        ).when(
            sfn.Condition.and_(
                sfn.Condition.is_timestamp("$.Payload.tfTsvUpdatedAt"),
                sfn.Condition.timestamp_less_than_json_path(
                    "$.Payload.contentUpdatedAt", "$.Payload.tfTsvUpdatedAt"),
            ),
            get_tfidf_job,
        ).otherwise(get_tf_job)

        unfurl_definition = map_job.iterator(get_note_job.next(choice_job))
        self.unfurl_statemachine = sfn.StateMachine(
            self,
            "UnfurlStateMachine",
            definition=unfurl_definition,
            timeout=core.Duration.minutes(20),
            state_machine_type=sfn.StateMachineType.EXPRESS,
            logs=sfn.LogOptions(
                destination=logs.LogGroup(self, "UnfurlStateMachineLogGroup"),
                level=sfn.LogLevel.ERROR,
            ),
        )

    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self.ssm_signing_secret = ssm.StringParameter(self,
                                                      "SLACK_SIGNING_SECRET",
                                                      string_value="xxxx")
        self.ssm_bot_token = ssm.StringParameter(self,
                                                 "SLACK_BOT_TOKEN",
                                                 string_value="xoxb-xxxx")
        self.ssm_kibela_team = ssm.StringParameter(self,
                                                   "KIBELA_TEAM",
                                                   string_value="teamname")
        self.ssm_kibela_token = ssm.StringParameter(self,
                                                    "KIBELA_TOKEN",
                                                    string_value="secret/xxxx")

        self.public_s3 = s3.Bucket(self,
                                   "PublicS3Bucket",
                                   public_read_access=True)

        self.private_s3 = s3.Bucket(self, "PrivateS3Bucket")

        self.step_lambda = lambda_.DockerImageFunction(
            self,
            "StepFunction",
            code=lambda_.DockerImageCode.from_image_asset(
                path.join(path.dirname(__name__), "../wordcloud-app"),
                cmd=["app.handler"],
                entrypoint=["/usr/local/bin/python", "-m", "awslambdaric"],
            ),
            environment={
                "SSM_KIBELA_TEAM": self.ssm_kibela_team.parameter_name,
                "SSM_KIBELA_TOKEN": self.ssm_kibela_token.parameter_name,
                "S3_PUBLIC": self.public_s3.bucket_name,
                "S3_PRIVATE": self.private_s3.bucket_name,
            },
            log_retention=logs.RetentionDays.FIVE_DAYS,
            timeout=core.Duration.seconds(600),
            memory_size=2048,
        )

        self.create_enumerate_statemachine()
        self.create_update_statemachine()
        self.create_unfurl_statemachine()

        self.bolt_lambda = PythonFunction(
            self,
            "BoltFunction",
            entry=path.join(path.dirname(__name__), "../bolt-app/app"),
            index="app.py",
            handler="handler",
            runtime=lambda_.Runtime.PYTHON_3_8,
            environment={
                # "SLACK_SIGNING_SECRET": self.secret.secret_value_from_json(
                #     "SLACK_SIGNING_SECRET"
                # ).to_string(),
                # "SLACK_BOT_TOKEN": self.secret.secret_value_from_json(
                #     "SLACK_BOT_TOKEN"
                # ).to_string(),
                "SSM_SLACK_SIGNING_SECRET":
                self.ssm_signing_secret.parameter_name,
                "SSM_SLACK_BOT_TOKEN":
                self.ssm_bot_token.parameter_name,
                "UPDATE_STATEMACHINE_ARN":
                self.update_note_statemachine.state_machine_arn,
                "UNFURL_STATEMACHINE_ARN":
                self.unfurl_statemachine.state_machine_arn,
            },
            log_retention=logs.RetentionDays.FIVE_DAYS,
            timeout=core.Duration.seconds(600),
        )
        self.apigw = apigateway.LambdaRestApi(self,
                                              "BoltRestGw",
                                              handler=self.bolt_lambda)

        self.ssm_kibela_team.grant_read(self.step_lambda)
        self.ssm_kibela_token.grant_read(self.step_lambda)
        self.public_s3.grant_read_write(self.step_lambda)
        self.public_s3.grant_delete(self.step_lambda)
        self.private_s3.grant_read_write(self.step_lambda)
        self.private_s3.grant_delete(self.step_lambda)

        self.bolt_lambda.add_to_role_policy(
            iam.PolicyStatement(resources=["*"],
                                actions=["lambda:InvokeFunction"]))
        self.ssm_signing_secret.grant_read(self.bolt_lambda)
        self.ssm_bot_token.grant_read(self.bolt_lambda)

        self.update_note_statemachine.grant_start_execution(self.bolt_lambda)
        self.unfurl_statemachine.grant(self.bolt_lambda,
                                       "states:StartSyncExecution")