Пример #1
0
  def add_states_schedule(self, action:str, schedule:events.Schedule, payload:Mapping[str,Any]=None)->None:
    """
    Creates a collection schedule
    """
    if payload == None:
      payload={}

    if not 'Action' in payload:
      payload['Action']=action

    # Define the long running process workflow...
    name_prefix = 'Fsi{}-Collector_{}'.format(self.resources.landing_zone.zone_name, action)

    long_running_process = FsiLongRunningCollectionProcess(self,name_prefix,
      action_name= action,
      resources=self.resources,
      function= self.function)

    # Create schedules...
    events.Rule(self,action+'Rule',
      rule_name='{}'.format(name_prefix),
      description='Fsi Collector '+action,
      schedule= schedule,
      #schedule= events.Schedule.rate(core.Duration.minutes(1)),
      targets=[
        targets.SfnStateMachine(
          machine=long_running_process.state_machine,
          dead_letter_queue=sqs.Queue(self,'{}_dlq'.format(name_prefix),
            queue_name='{}_dlq'.format(name_prefix),
            removal_policy= core.RemovalPolicy.DESTROY),
          input= events.RuleTargetInput.from_object({
            'Payload': payload
          }))
      ])
Пример #2
0
    def __init__(self, scope: Construct, id: str, custom_function_es: IFunction, custom_function_rds: IFunction, state_machine: IStateMachine, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        custom_rule_es = config.CustomRule(self, "Custom_es",
        configuration_changes=True,
        lambda_function=custom_function_es,
        config_rule_name=constants.CONFIG_RULE_ES_PUBLIC
        )
        custom_rule_es.scope_to_resource("AWS::Elasticsearch::Domain")

        custom_rule_rds = config.CustomRule(self, "Custom_rds",
        configuration_changes=True,
        lambda_function=custom_function_rds,
        config_rule_name=constants.CONFIG_RULE_RDS_PUBLIC
        )
        custom_rule_rds.scope_to_resource("AWS::RDS::DBInstance")

        rule_detail = {
            "requestParameters": {
            "evaluations": {
                "complianceType": [
                "NON_COMPLIANT"
                ],
                "complianceResourceType": ["AWS::Elasticsearch::Domain", "AWS::RDS::DBInstance"]
            }
          }
        }
        event_pattern = events.EventPattern(source=["aws.config"], detail=rule_detail)

        events.Rule(self, 'ComplianceCustomRule',
                    enabled=True,
                    event_pattern=event_pattern,
                    targets=[targets.SfnStateMachine(state_machine)])
Пример #3
0
    def cron(self, name: str, cron: dict, *, input: aws_events.RuleTargetInput=None):
        """Schedule CRON-like events for your statemachine

        Args:
            name (str): [description]
            cron (dict): [description]
            input (aws_events.RuleTargetInput, optional): [description]. Defaults to None.
        """
        rule = aws_events.Rule(self, name, schedule=aws_events.Schedule.cron(**cron))
        target = aws_events_targets.SfnStateMachine(
            self.sm,
            input=input
        )
        rule.add_target(target)
Пример #4
0
    def __init__(self, app: core.Construct, stack_name: str,
                 batch_job_definition: aws_batch.JobDefinition,
                 batch_job_queue: aws_batch.JobQueue):
        super().__init__(scope=app, id=f"{stack_name}-invoke")

        # ============= #
        # StepFunctions #
        # ============= #
        # Ref::{keyword} can be replaced with StepFunction input
        command_overrides = ["python", "__init__.py", "--time", "Ref::time"]

        batch_task = aws_sfn_tasks.BatchSubmitJob(
            scope=self,
            id=f"sfn_batch_job",
            job_definition=batch_job_definition,
            job_name=f"sfn_batch_job",
            job_queue=batch_job_queue,
            container_overrides=aws_sfn_tasks.BatchContainerOverrides(
                command=command_overrides),
            payload=aws_sfn.TaskInput.from_object({"time.$": "$.time"}))

        # `one step` for StepFunctions
        definition = batch_task

        sfn_daily_process = aws_sfn.StateMachine(scope=self,
                                                 id=f"step_functions",
                                                 definition=definition)

        # ================ #
        # CloudWatch Event #
        # ================ #

        # Run every day at 21:30 JST
        # See https://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html
        events_daily_process = aws_events.Rule(
            scope=self,
            id=f"DailySFnProcess",
            schedule=aws_events.Schedule.cron(minute="30",
                                              hour="12",
                                              month='*',
                                              day="*",
                                              year='*'),
        )
        events_daily_process.add_target(
            aws_events_targets.SfnStateMachine(sfn_daily_process))
Пример #5
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The start of the image pipeline
        imageBucket = aws_s3.Bucket(self, "imageBucket")

        # Capture API activity with a trail
        imageBucketTrail = aws_cloudtrail.Trail(self,
                                                "imageBucketTrail",
                                                is_multi_region_trail=False)

        # Restrict to S3 data-plane events
        imageBucketTrail.add_s3_event_selector(
            include_management_events=False,
            prefixes=[f"{imageBucket.bucket_arn}/"],
            read_write_type=aws_cloudtrail.ReadWriteType.WRITE_ONLY)

        # Filter to just PutObject and CopyObject events
        imageBucketRule = aws_events.Rule(
            self,
            "imageBucketRule",
            event_pattern={
                "source": ["aws.s3"],
                "detail": {
                    "eventSource": ["s3.amazonaws.com"],
                    "eventName": ["PutObject", "CopyObject"],
                    "requestParameters": {
                        "bucketName": [imageBucket.bucket_name]
                    }
                }
            })

        #--
        #  Lambda Layers
        #--------------------#

        opencvLayer = aws_lambda.LayerVersion(
            self,
            'opencvLayer',
            code=aws_lambda.AssetCode('layers/opencvLayer'),
            compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_6])

        boto3Layer = aws_lambda.LayerVersion(
            self,
            'boto3Layer',
            code=aws_lambda.AssetCode('layers/boto3Layer'),
            compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_6])

        #--
        #  Lambda Functions
        #--------------------#

        # Gather info about an image, name, extension, etc
        getImageInfoFunc = aws_lambda.Function(
            self,
            "getImageInfoFunc",
            code=aws_lambda.AssetCode('functions/getImageInfoFunc'),
            handler="lambda.handler",
            runtime=aws_lambda.Runtime.PYTHON_3_6)

        # The home for the website
        webBucket = aws_s3.Bucket(self,
                                  "webBucket",
                                  website_index_document='index.html')

        # Copy the image to the web bucket
        copyImageFunc = aws_lambda.Function(
            self,
            "copyImageFunc",
            code=aws_lambda.AssetCode('functions/copyImageFunc'),
            handler="lambda.handler",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            layers=[boto3Layer],
            environment={
                'OUTPUTBUCKET': webBucket.bucket_name,
                'OUTPUTPREFIX': 'images/'
            })

        # Grant permissions to read from the source and write to the desination
        imageBucket.grant_read(copyImageFunc)
        webBucket.grant_write(copyImageFunc)

        # Create a thumbnail of the image and place in the web bucket
        createThumbnailFunc = aws_lambda.Function(
            self,
            "createThumbnailFunc",
            code=aws_lambda.AssetCode('functions/createThumbnailFunc'),
            handler="lambda.handler",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            layers=[boto3Layer, opencvLayer],
            timeout=core.Duration.seconds(10),
            memory_size=256,
            environment={
                'OUTPUTBUCKET': webBucket.bucket_name,
                'OUTPUTPREFIX': 'images/'
            })

        # Grant permissions to read from the source and write to the desination
        imageBucket.grant_read(createThumbnailFunc)
        webBucket.grant_write(createThumbnailFunc)

        # Store page information
        pageTable = aws_dynamodb.Table(
            self,
            'pageTable',
            partition_key={
                'name': 'pageName',
                'type': aws_dynamodb.AttributeType.STRING
            },
            billing_mode=aws_dynamodb.BillingMode.PAY_PER_REQUEST,
            stream=aws_dynamodb.StreamViewType.NEW_IMAGE)

        # Save page and image information
        updatePageInfoFunc = aws_lambda.Function(
            self,
            "updatePageInfoFunc",
            code=aws_lambda.AssetCode('functions/updatePageInfoFunc'),
            handler="lambda.handler",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            layers=[boto3Layer],
            environment={
                'PAGETABLE': pageTable.table_name,
                'PAGEPREFIX': 'posts/'
            })

        # Grant permissions to write to the page table
        pageTable.grant_write_data(updatePageInfoFunc)

        imagePipelineDone = aws_stepfunctions.Succeed(self,
                                                      "Done processing image")

        updatePageInfoJob = aws_stepfunctions.Task(
            self,
            'Update page info',
            task=aws_stepfunctions_tasks.InvokeFunction(updatePageInfoFunc))
        updatePageInfoJob.next(imagePipelineDone)

        copyImageJob = aws_stepfunctions.Task(
            self,
            'Copy image',
            task=aws_stepfunctions_tasks.InvokeFunction(copyImageFunc))

        createThumbnailJob = aws_stepfunctions.Task(
            self,
            'Create thumbnail',
            task=aws_stepfunctions_tasks.InvokeFunction(createThumbnailFunc))

        # These tasks can be done in parallel
        processImage = aws_stepfunctions.Parallel(self,
                                                  'Process image',
                                                  result_path="$.images")

        processImage.branch(copyImageJob)
        processImage.branch(createThumbnailJob)
        processImage.next(updatePageInfoJob)

        # Results of file extension check
        notPng = aws_stepfunctions.Succeed(self, "Not a PNG")

        # Verify the file extension
        checkForPng = aws_stepfunctions.Choice(self, 'Is a PNG?')
        checkForPng.when(
            aws_stepfunctions.Condition.string_equals('$.extension', 'png'),
            processImage)
        checkForPng.otherwise(notPng)

        # A single image pipeline job for testing
        getImageInfoJob = aws_stepfunctions.Task(
            self,
            'Get image info',
            task=aws_stepfunctions_tasks.InvokeFunction(getImageInfoFunc))
        getImageInfoJob.next(checkForPng)

        # Configure the image pipeline and starting state
        imagePipeline = aws_stepfunctions.StateMachine(
            self, "imagePipeline", definition=getImageInfoJob)

        # Matching events start the image pipline
        imageBucketRule.add_target(
            aws_events_targets.SfnStateMachine(
                imagePipeline,
                input=aws_events.RuleTargetInput.from_event_path(
                    "$.detail.requestParameters")))
Пример #6
0
    def __init__(self, scope: Construct, construct_id: str, env,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, env=env, **kwargs)

        rg_property = network_fw.CfnRuleGroup.RuleGroupProperty(
            rule_variables=None,
            rules_source=network_fw.CfnRuleGroup.RulesSourceProperty(
                stateless_rules_and_custom_actions=network_fw.CfnRuleGroup.
                StatelessRulesAndCustomActionsProperty(stateless_rules=[
                    network_fw.CfnRuleGroup.StatelessRuleProperty(
                        priority=10,
                        rule_definition=network_fw.CfnRuleGroup.
                        RuleDefinitionProperty(
                            actions=["aws:drop"],
                            match_attributes=network_fw.CfnRuleGroup.
                            MatchAttributesProperty(destinations=[
                                network_fw.CfnRuleGroup.AddressProperty(
                                    address_definition="127.0.0.1/32")
                            ])))
                ])))

        nf_rule_group = network_fw.CfnRuleGroup(
            scope=self,
            id='GuardDutyNetworkFireWallRuleGroup',
            capacity=100,
            rule_group_name='guardduty-network-firewall',
            type='STATELESS',
            description='Guard Duty network firewall rule group',
            tags=[CfnTag(key='Name', value='cfn.rule-group.stack')],
            rule_group=rg_property)
        """ https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-rule-dlq.html#dlq-considerations """
        dlq_statemachine = sqs.Queue(self,
                                     'DLQStateMachine',
                                     queue_name='dlq_state_machine')

        guardduty_firewall_ddb = ddb.Table(
            scope=self,
            id=f'GuarddutyFirewallDDB',
            table_name='GuardDutyFirewallDDBTable',
            removal_policy=RemovalPolicy.DESTROY,
            partition_key=ddb.Attribute(name='HostIp',
                                        type=ddb.AttributeType.STRING),
            billing_mode=ddb.BillingMode.PAY_PER_REQUEST)
        """ IAM role for ddb permission """
        nf_iam_role = iam.Role(
            self,
            'DDBRole',
            role_name=f'ddb-nf-role-{env.region}',
            assumed_by=iam.ServicePrincipal(service='lambda.amazonaws.com'))

        nf_iam_role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=["arn:aws:logs:*:*:*"],
                                actions=[
                                    "logs:CreateLogGroup",
                                    "logs:CreateLogStream", "logs:PutLogEvents"
                                ]))

        nf_iam_role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=[
                                    guardduty_firewall_ddb.table_arn,
                                    f"{guardduty_firewall_ddb.table_arn}/*"
                                ],
                                actions=[
                                    "dynamodb:PutItem", "dynamodb:GetItem",
                                    "dynamodb:Scan"
                                ]))

        nf_iam_role.add_to_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                resources=[nf_rule_group.ref, f"{nf_rule_group.ref}/*"],
                actions=[
                    "network-firewall:DescribeRuleGroup",
                    "network-firewall:UpdateRuleGroup"
                ]))

        record_ip_in_db = _lambda.Function(
            self,
            'RecordIpInDB',
            function_name='record-ip-in-ddb',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset('lambda_fns'),
            handler='addIPToDDB.handler',
            environment=dict(ACLMETATABLE=guardduty_firewall_ddb.table_name),
            role=nf_iam_role)
        """
        https://docs.amazonaws.cn/en_us/eventbridge/latest/userguide/eb-event-patterns-content-based-filtering.html
        """
        record_ip_task = step_fn_task.LambdaInvoke(
            self,
            'RecordIpDDBTask',
            lambda_function=record_ip_in_db,
            payload=step_fn.TaskInput.from_object({
                "comment":
                "Relevant fields from the GuardDuty / Security Hub finding",
                "HostIp.$":
                "$.detail.findings[0].ProductFields.aws/guardduty/service/action/networkConnectionAction/remoteIpDetails/ipAddressV4",
                "Timestamp.$":
                "$.detail.findings[0].ProductFields.aws/guardduty/service/eventLastSeen",
                "FindingId.$": "$.id",
                "AccountId.$": "$.account",
                "Region.$": "$.region"
            }),
            result_path='$',
            payload_response_only=True)

        firewall_update_rule = _lambda.Function(
            scope=self,
            id='GuardDutyUpdateNetworkFirewallRule',
            function_name='gurdduty-update-networkfirewal-rule-group',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset('lambda_fns'),
            handler='updateNetworkFireWall.handler',
            environment=dict(
                FIREWALLRULEGROUP=nf_rule_group.ref,
                RULEGROUPPRI='30000',
                CUSTOMACTIONNAME='GuardDutytoFirewall',
                CUSTOMACTIONVALUE='gurdduty-update-networkfirewal-rule-group'),
            role=nf_iam_role)

        firewall_update_rule_task = step_fn_task.LambdaInvoke(
            self,
            'FirewallUpdateRuleTask',
            lambda_function=firewall_update_rule,
            input_path='$',
            result_path='$',
            payload_response_only=True)

        firewall_no_update_job = step_fn.Pass(self, 'No Firewall change')
        notify_failure_job = step_fn.Fail(self,
                                          'NotifyFailureJob',
                                          cause='Any Failure',
                                          error='Unknown')

        send_to_slack = _lambda.Function(
            scope=self,
            id='SendAlertToSlack',
            function_name='gurdduty-networkfirewal-to-slack',
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler="sendSMSToSlack.handler",
            code=_lambda.Code.from_asset('lambda_fns'))

        send_slack_task = step_fn_task.LambdaInvoke(
            scope=self,
            id='LambdaToSlackDemo',
            lambda_function=send_to_slack,
            input_path='$',
            result_path='$')

        is_new_ip = step_fn.Choice(self, "New IP?")
        is_block_succeed = step_fn.Choice(self, "Block sucessfully?")

        definition = step_fn.Chain \
            .start(record_ip_task
                   .add_retry(errors=["States.TaskFailed"],
                              interval=Duration.seconds(2),
                              max_attempts=2)
                   .add_catch(errors=["States.ALL"], handler=notify_failure_job)) \
            .next(is_new_ip
                  .when(step_fn.Condition.boolean_equals('$.NewIP', True),
                        firewall_update_rule_task
                            .add_retry(errors=["States.TaskFailed"],
                                       interval=Duration.seconds(2),
                                       max_attempts=2
                                       )
                            .add_catch(errors=["States.ALL"], handler=notify_failure_job)
                            .next(
                                is_block_succeed
                                    .when(step_fn.Condition.boolean_equals('$.Result', False), notify_failure_job)
                                    .otherwise(send_slack_task)
                            )
                        )
                  .otherwise(firewall_no_update_job)
                  )

        guardduty_state_machine = step_fn.StateMachine(
            self,
            'GuarddutyStateMachine',
            definition=definition,
            timeout=Duration.minutes(5),
            state_machine_name='guardduty-state-machine')

        event.Rule(
            scope=self,
            id='EventBridgeCatchIPv4',
            description="Security Hub - GuardDuty findings with remote IP",
            rule_name='guardduty-catch-ipv4',
            event_pattern=event.EventPattern(
                account=['123456789012'],
                detail_type=["GuardDuty Finding"],
                source=['aws.securityhub'],
                detail={
                    "findings": {
                        "ProductFields": {
                            "aws/guardduty/service/action/networkConnectionAction/remoteIpDetails/ipAddressV4":
                            [{
                                "exists": True
                            }]
                        }
                    }
                }),
            targets=[
                event_target.SfnStateMachine(
                    machine=guardduty_state_machine,
                    dead_letter_queue=dlq_statemachine)
            ])
        """ Send other findings to slack """
        send_finding_to_slack = _lambda.Function(
            self,
            'SendFindingToSlack',
            function_name='send-finding-to-slack',
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler="sendFindingToSlack.handler",
            code=_lambda.Code.from_asset('lambda_fns'))

        send_findings_task = step_fn_task.LambdaInvoke(
            self,
            'SendFindingToSlackTask',
            lambda_function=send_finding_to_slack,
            payload=step_fn.TaskInput.from_object({
                "comment":
                "Others fields from the GuardDuty / Security Hub finding",
                "severity.$":
                "$.detail.findings[0].Severity.Label",
                "Account_ID.$":
                "$.account",
                "Finding_ID.$":
                "$.id",
                "Finding_Type.$":
                "$.detail.findings[0].Types",
                "Region.$":
                "$.region",
                "Finding_description.$":
                "$.detail.findings[0].Description"
            }),
            result_path='$')

        slack_failure_job = step_fn.Fail(self,
                                         'SlackNotifyFailureJob',
                                         cause='Any Failure',
                                         error='Unknown')

        finding_definition = step_fn.Chain \
            .start(send_findings_task
                   .add_retry(errors=["States.TaskFailed"],
                              interval=Duration.seconds(2),
                              max_attempts=2)
                   .add_catch(errors=["States.ALL"], handler=slack_failure_job))

        sechub_findings_state_machine = step_fn.StateMachine(
            self,
            'SecHubFindingsStateMachine',
            definition=finding_definition,
            timeout=Duration.minutes(5),
            state_machine_name='sechub-finding-state-machine')

        event.Rule(scope=self,
                   id='EventBridgeFindings',
                   description="Security Hub - GuardDuty findings others",
                   rule_name='others-findings',
                   event_pattern=event.EventPattern(
                       account=['123456789012'],
                       source=['aws.securityhub'],
                       detail_type=['Security Hub Findings - Imported'],
                       detail={"severity": [5, 8]}),
                   targets=[
                       event_target.SfnStateMachine(
                           machine=sechub_findings_state_machine,
                           dead_letter_queue=dlq_statemachine)
                   ])
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 redshift_cluster_name: str,
                 user_secret: Secret) -> None:
        super().__init__(scope, id)

        stack = Stack.of(self)

        subprocess.call(
            ['pip', 'install', '-t', 'dwh/dwh_loader_layer/python/lib/python3.8/site-packages', '-r',
             'dwh/dwh_loader/requirements.txt', '--platform', 'manylinux1_x86_64', '--only-binary=:all:',
             '--upgrade'])

        requirements_layer = _lambda.LayerVersion(scope=self,
                                                  id='PythonRequirementsTemplate',
                                                  code=_lambda.Code.from_asset('dwh/dwh_loader_layer'),
                                                  compatible_runtimes=[_lambda.Runtime.PYTHON_3_8])

        dwh_loader_role = _iam.Role(
            self, 'Role',
            assumed_by=_iam.ServicePrincipal('lambda.amazonaws.com')
        )

        dwh_loader_role.add_managed_policy(_iam.ManagedPolicy.from_aws_managed_policy_name(
            'service-role/AWSLambdaBasicExecutionRole'
        ))

        dwh_loader_role.attach_inline_policy(
            _iam.Policy(
                self, 'InlinePolicy',
                statements=[
                    _iam.PolicyStatement(
                        actions=[
                            "redshift-data:ExecuteStatement",
                            "redshift-data:CancelStatement",
                            "redshift-data:ListStatements",
                            "redshift-data:GetStatementResult",
                            "redshift-data:DescribeStatement",
                            "redshift-data:ListDatabases",
                            "redshift-data:ListSchemas",
                            "redshift-data:ListTables",
                            "redshift-data:DescribeTable"
                        ],
                        resources=['*']
                    ),
                    _iam.PolicyStatement(
                        actions=["secretsmanager:GetSecretValue"],
                        resources=[user_secret.secret_arn]
                    ),
                    _iam.PolicyStatement(
                        actions=["redshift:GetClusterCredentials"],
                        resources=[
                            "arn:aws:redshift:*:*:dbname:*/*",
                            "arn:aws:redshift:*:*:dbuser:*/"+_config.Redshift.ETL_USER
                        ]
                    ),
                    _iam.PolicyStatement(
                        effect=_iam.Effect('DENY'),
                        actions=["redshift:CreateClusterUser"],
                        resources=["arn:aws:redshift:*:*:dbuser:*/"+_config.Redshift.ETL_USER]
                    ),
                    _iam.PolicyStatement(
                        conditions={
                            'StringLike': {
                                "iam:AWSServiceName": "redshift-data.amazonaws.com"
                            }
                        },
                        actions=["iam:CreateServiceLinkedRole"],
                        resources=["arn:aws:iam::*:role/aws-service-role/redshift-data.amazonaws.com/AWSServiceRoleForRedshift"]
                    ),
                ]
            )
        )

        dwh_loader_function = _lambda.Function(
            self, 'Lambda',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset('dwh/dwh_loader'),
            handler='dwh_loader.handler',
            function_name='dwh-loader',
            environment={
                'CLUSTER_NAME': redshift_cluster_name,
                'PROCEDURE': _config.Redshift.ETL_PROCEDURE,
                'SECRET_ARN': user_secret.secret_arn,
                'DATABASE': _config.Redshift.DATABASE,
                'REGION': core.Aws.REGION,
                'SCHEMA': _config.Redshift.SCHEMA
            },
            layers=[requirements_layer],
            timeout=core.Duration.seconds(30),
            role=dwh_loader_role
        )

        dwh_loader_submit = _sfn_tasks.LambdaInvoke(
            self, 'Submit',
            lambda_function=dwh_loader_function,
            payload_response_only=True
        )

        dwh_loader_wait = _sfn.Wait(
            self, 'Wait',
            time=_sfn.WaitTime.duration(core.Duration.seconds(30))
        )

        dwh_loader_complete = _sfn.Choice(
            self, 'Complete'
        )

        dwh_loader_failed = _sfn.Fail(
            self, 'Fail',
            cause="Redshift Data API statement failed",
            error="$.Result.Error"
        )

        dwh_loader_status = _sfn_tasks.LambdaInvoke(
            self, 'Status',
            lambda_function=dwh_loader_function,
            result_path='$.Result',
            payload_response_only=True
        )

        definition = dwh_loader_submit \
            .next(dwh_loader_wait) \
            .next(dwh_loader_status) \
            .next(dwh_loader_complete
                  .when(_sfn.Condition.string_equals('$.Result.Status', 'FAILED'), dwh_loader_failed)
                  .when(_sfn.Condition.string_equals('$.Result.Status', 'FINISHED'), _sfn.Succeed(self, 'DwhLoaderSuccess'))
                  .otherwise(dwh_loader_wait))

        dwh_loader_stepfunctions = _sfn.StateMachine(
            self, 'StepFunctions',
            definition=definition,
            timeout=core.Duration.minutes(30)
        )

        step_trigger = _events.Rule(
            self, 'StepTrigger',
            schedule=_events.Schedule.cron(minute='0/30',
                                           hour='*',
                                           month='*',
                                           week_day='*',
                                           year='*')
        )

        step_trigger.add_target(
            _events_targets.SfnStateMachine(
                machine=dwh_loader_stepfunctions,
            )
        )
Пример #8
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        email_subscription_parameter = core.CfnParameter(
            self,
            "EmailSubscriptionParameter",
            description="Email Address for Notification Subscription",
            allowed_pattern=
            '^(([^<>()\[\]\\.,;:\s@"]+(\.[^<>()\[\]\\.,;:\s@"]+)*)|(".+"))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))$',
            min_length=1,
            constraint_description="Must be a valid email.")
        email_subscription = email_subscription_parameter.value_as_string

        #runtime=aws_lambda.Runtime.PYTHON_3_8

        boto3_lambda_layer = None

        boto3_lambda_layer = self.create_dependencies_layer(
            id="boto3layer",
            requirements_path="./layers/boto3/requirements.txt",
            output_dir="./layers/boto3")

        is_inline = False

        context_enrichment = self.create_lambda_function(
            boto3_lambda_layer, "./functions/context-enrichment",
            "context_enrichment", is_inline)
        """
    context_enrichment=aws_lambda.Function(
      self,
      "context_enrichment",
      runtime=runtime,
      handler="app.handler",
      code=aws_lambda.AssetCode("./functions/context-enrichment"),
      layers=[boto3_lambda_layer]
    )
    """
        handler_statement = iam.PolicyStatement(actions=[
            "iam:ListRoleTags", "s3:GetBucketTagging", "lambda:ListTags",
            "sqs:ListQueueTags", "kms:ListAliases", "kms:ListResourceTags"
        ],
                                                effect=iam.Effect.ALLOW,
                                                resources=["*"])
        context_enrichment.add_to_role_policy(handler_statement)

        cmk_key = kms.Key(
            self,
            "SNSEncryptionAtRestKey",
            description="SNS Encryption at rest key",
            alias="sns-encryption-at-rest",
            enable_key_rotation=True,
        )

        email_topic = sns.Topic(
            self,
            "AccessAnalyzerNotificationTopic",
            display_name="Access Analyzer Finding Notification Topic",
            master_key=cmk_key)
        email_topic.add_subscription(
            subscriptions.EmailSubscription(email_subscription))

        notification = self.create_lambda_function(
            boto3_lambda_layer, "./functions/notification", "notification",
            is_inline, {"SNS_TOPIC_ARN": email_topic.topic_arn})
        """
    notification=aws_lambda.Function(
      self,
      "notification",
      runtime=runtime,
      handler="app.handler",
      code=aws_lambda.AssetCode("./functions/notification"),
      layers=[boto3_lambda_layer],
      environment={"SNS_TOPIC_ARN":email_topic.topic_arn}
    )
    """
        notification_statement = iam.PolicyStatement(actions=[
            "sns:Publish",
        ],
                                                     effect=iam.Effect.ALLOW,
                                                     resources=["*"])
        notification.add_to_role_policy(notification_statement)
        cmk_key.grant_encrypt_decrypt(notification)

        archive_access_analyzer_finding = self.create_lambda_function(
            boto3_lambda_layer, "./functions/archive-access-analyzer-finding",
            "archive-access-analyzer-finding", is_inline)
        """
    archive_access_analyzer_finding=aws_lambda.Function(
      self,
      "archive-access-analyzer-finding",
      runtime=runtime,
      handler="app.handler",
      code=aws_lambda.AssetCode("./functions/archive-access-analyzer-finding"),
      layers=[boto3_lambda_layer]
    )
    """
        archive_statement = iam.PolicyStatement(actions=[
            "access-analyzer:UpdateFindings",
        ],
                                                effect=iam.Effect.ALLOW,
                                                resources=["*"])
        archive_access_analyzer_finding.add_to_role_policy(archive_statement)

        evaluate_access_analyzer_finding = self.create_lambda_function(
            boto3_lambda_layer, "./functions/evaluate-access-analyzer-finding",
            "evaluate-access-analyzer-finding", is_inline)
        """
    evaluate_access_analyzer_finding=aws_lambda.Function(
      self,
      "evaluate-access-analyzer-finding",
      runtime=runtime,
      handler="app.handler",
      code=aws_lambda.AssetCode("./functions/evaluate-access-analyzer-finding"),
      layers=[boto3_lambda_layer]
    )
    """
        #https://docs.aws.amazon.com/cdk/api/latest/docs/aws-stepfunctions-readme.html
        access_analyzer_handler_task = sfn.Task(
            self,
            "Context Enrichment",
            task=sfn_tasks.InvokeFunction(context_enrichment),
            result_path="$.guid",
        )

        notification_task = sfn.Task(
            self,
            "Send Notification",
            task=sfn_tasks.InvokeFunction(notification),
            result_path="$.guid",
        )

        archive_task = sfn.Task(
            self,
            "Archive Finding",
            task=sfn_tasks.InvokeFunction(archive_access_analyzer_finding),
            result_path="$.guid",
        )

        evaluate_task = sfn.Task(
            self,
            "Evaluate Risk Level",
            task=sfn_tasks.InvokeFunction(evaluate_access_analyzer_finding),
            result_path="$.guid",
        )

        definition=access_analyzer_handler_task. \
          next(evaluate_task). \
          next(sfn.Choice(self, "Archive?"). \
            when(sfn.Condition.string_equals("$.guid.status", "ARCHIVE"), archive_task). \
            when(sfn.Condition.string_equals("$.guid.status", "NOTIFY"), notification_task) \
          )

        state_machine = sfn.StateMachine(
            self,
            "Access-Analyzer-Automatic-Finding-Archive-State-Machine",
            definition=definition,
            timeout=core.Duration.minutes(5),
        )

        #https://docs.aws.amazon.com/step-functions/latest/dg/tutorial-cloudwatch-events-s3.html
        access_analyzer_finding_rule = aws_events.Rule(
            self,
            "AccessAnalzyerFindingActiveEventRule",
            description="Access Analyzer Finding Event Active",
            enabled=True,
            event_pattern=aws_events.EventPattern(
                source=["aws.access-analyzer"],
                detail_type=["Access Analyzer Finding"],
                detail={"status": ["ACTIVE"]}),
            targets=[
                aws_events_targets.SfnStateMachine(state_machine),
                aws_events_targets.LambdaFunction(context_enrichment)
            ])
    def __init__(self, scope: core.Construct, id: str,
                 log_bucket: _s3.Bucket,
                 config_table: _dynamodb.Table,
                 tshirt_size: str,
                 sink_bucket: _s3.Bucket,
                 vpc: _ec2.Vpc,
                 **kwargs) -> None:

        super().__init__(scope, id, **kwargs)

        service_role = _iam.Role(
            self, 'BatchEmrServiceRole',
            assumed_by=_iam.ServicePrincipal('elasticmapreduce.amazonaws.com')
        )

        service_role.add_managed_policy(_iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AmazonElasticMapReduceRole'))

        cluster_role = _iam.Role(
            self, 'BatchEmrClusterRole',
            assumed_by=_iam.ServicePrincipal("ec2.amazonaws.com")
        )

        _iam.Policy(
            self, 'BatchEmrClusterPolicy',
            statements=[
                _iam.PolicyStatement(
                    actions=[
                        "glue:CreateDatabase",
                        "glue:UpdateDatabase",
                        "glue:DeleteDatabase",
                        "glue:GetDatabase",
                        "glue:GetDatabases",
                        "glue:CreateTable",
                        "glue:UpdateTable",
                        "glue:DeleteTable",
                        "glue:GetTable",
                        "glue:GetTables",
                        "glue:GetTableVersions",
                        "glue:CreatePartition",
                        "glue:BatchCreatePartition",
                        "glue:UpdatePartition",
                        "glue:DeletePartition",
                        "glue:BatchDeletePartition",
                        "glue:GetPartition",
                        "glue:GetPartitions",
                        "glue:BatchGetPartition",
                        "glue:CreateUserDefinedFunction",
                        "glue:UpdateUserDefinedFunction",
                        "glue:DeleteUserDefinedFunction",
                        "glue:GetUserDefinedFunction",
                        "glue:GetUserDefinedFunctions",
                        "cloudwatch:PutMetricData",
                        "dynamodb:ListTables",
                        "s3:HeadBucket",
                        "ec2:Describe*",
                    ],
                    resources=['*']
                ),
                _iam.PolicyStatement(
                    actions=['s3:GetObject'],
                    resources=[
                        'arn:aws:s3:::' + ARA_BUCKET_NAME + BINARIES + DataGenConfig.DSDGEN_INSTALL_SCRIPT,
                        'arn:aws:s3:::' + ARA_BUCKET_NAME + BINARIES + DataGenConfig.JAR_FILE
                    ]
                ),
                _iam.PolicyStatement(
                    actions=['s3:PutObject'],
                    resources=[log_bucket.bucket_arn + "/data-generator/*"]
                ),
                _iam.PolicyStatement(
                    actions=[
                        "s3:AbortMultipartUpload",
                        "s3:CreateBucket",
                        "s3:DeleteObject",
                        "s3:GetBucketVersioning",
                        "s3:GetObject",
                        "s3:GetObjectTagging",
                        "s3:GetObjectVersion",
                        "s3:ListBucket",
                        "s3:ListBucketMultipartUploads",
                        "s3:ListBucketVersions",
                        "s3:ListMultipartUploadParts",
                        "s3:PutBucketVersioning",
                        "s3:PutObject",
                        "s3:PutObjectTagging"
                    ],
                    resources=[
                        sink_bucket.bucket_arn + '/*',
                        sink_bucket.bucket_arn

                    ]
                )
            ],
            roles=[cluster_role]
        )

        cluster_role.add_managed_policy(_iam.ManagedPolicy.from_aws_managed_policy_name('AmazonSSMManagedInstanceCore'))

        _iam.CfnInstanceProfile(
            self, 'BatchEmrClusterInstanceProfile',
            roles=[cluster_role.role_name],
            instance_profile_name=cluster_role.role_name
        )

        # Security Groups for the EMR cluster (private subnet)
        # https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-man-sec-groups.html#emr-sg-elasticmapreduce-master-private
        master_sg = _ec2.SecurityGroup(self, 'ElasticMapReduce-Master-Private', vpc=vpc)
        slave_sg = _ec2.SecurityGroup(self, 'ElasticMapReduce-Slave-Private', vpc=vpc)
        service_sg = _ec2.SecurityGroup(self, 'ElasticMapReduce-ServiceAccess', vpc=vpc, allow_all_outbound=False)

        # Service SG used by the proxy instance
        service_sg.add_ingress_rule(master_sg, _ec2.Port.tcp(9443))
        service_sg.add_egress_rule(master_sg, _ec2.Port.tcp(8443))
        service_sg.add_egress_rule(slave_sg, _ec2.Port.tcp(8443))

        # EMR Master
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_icmp())
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_tcp())
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_udp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_icmp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_tcp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_udp())
        master_sg.add_ingress_rule(service_sg, _ec2.Port.tcp(8443))

        # EMR Slave
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_icmp())
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_tcp())
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_udp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_icmp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_tcp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_udp())
        slave_sg.add_ingress_rule(service_sg, _ec2.Port.tcp(8443))

        with open('common/common_cdk/lambda/datagen_config.py', 'r') as f:
            lambda_source = f.read()

        configure_datagen_function = _lambda.SingletonFunction(
            self, 'BatchConfigureDatagenLambda',
            uuid="58a9a222-ff07-11ea-adc1-0242ac120002",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            function_name='datagen-config',
            environment={
                'TABLE_NAME': config_table.table_name,
                'JAR_LOCATION': BINARIES_LOCATION + DataGenConfig.JAR_FILE,
            },
            timeout=core.Duration.seconds(10)
        )

        configure_datagen_function.role.add_to_policy(
            _iam.PolicyStatement(
                actions=[
                    'dynamodb:GetItem',
                    'dynamodb:PutItem',
                ],
                resources=[config_table.table_arn]
            )
        )

        terminate_cluster = _sfn_tasks.EmrTerminateCluster(
            self, 'BatchDeleteCluster',
            cluster_id=_sfn.TaskInput.from_data_at("$.Emr.Cluster.Id").value,
            integration_pattern=_sfn.IntegrationPattern.RUN_JOB,
        )

        terminate_cluster_error = _sfn_tasks.EmrTerminateCluster(
            self, 'BatchDeleteClusterError',
            cluster_id=_sfn.TaskInput.from_data_at("$.Emr.Cluster.Id").value,
            integration_pattern=_sfn.IntegrationPattern.RUN_JOB,
        ).next(_sfn.Fail(self, 'StepFailure'))

        create_cluster = _sfn_tasks.EmrCreateCluster(
            self, "BatchCreateEMRCluster",
            name="BatchDatagenCluster",
            result_path="$.Emr",
            release_label='emr-5.30.1',
            log_uri=log_bucket.s3_url_for_object() + "/data-generator",
            cluster_role=cluster_role,
            service_role=service_role,
            bootstrap_actions=[
                _sfn_tasks.EmrCreateCluster.BootstrapActionConfigProperty(
                    name="dsdgen-install",
                    script_bootstrap_action=_sfn_tasks.EmrCreateCluster.ScriptBootstrapActionConfigProperty(
                        path=BINARIES_LOCATION + DataGenConfig.DSDGEN_INSTALL_SCRIPT,
                    )
                )
            ],
            applications=[
                _sfn_tasks.EmrCreateCluster.ApplicationConfigProperty(
                    name="spark"
                ),
                _sfn_tasks.EmrCreateCluster.ApplicationConfigProperty(
                    name="hadoop"
                )
            ],
            instances=_sfn_tasks.EmrCreateCluster.InstancesConfigProperty(
                emr_managed_master_security_group=master_sg.security_group_id,
                emr_managed_slave_security_group=slave_sg.security_group_id,
                service_access_security_group=service_sg.security_group_id,
                ec2_subnet_ids=vpc.select_subnets().subnet_ids,
                instance_fleets=[
                    _sfn_tasks.EmrCreateCluster.InstanceFleetConfigProperty(
                        instance_fleet_type=_sfn_tasks.EmrCreateCluster.InstanceRoleType.MASTER,
                        instance_type_configs=[
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5.xlarge',
                                weighted_capacity=1
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5a.xlarge',
                                weighted_capacity=1
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m4.xlarge',
                                weighted_capacity=1
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5d.xlarge',
                                weighted_capacity=1
                            ),
                        ],
                        launch_specifications=_sfn_tasks.EmrCreateCluster.InstanceFleetProvisioningSpecificationsProperty(
                            spot_specification=_sfn_tasks.EmrCreateCluster.SpotProvisioningSpecificationProperty(
                                timeout_action=_sfn_tasks.EmrCreateCluster.SpotTimeoutAction.SWITCH_TO_ON_DEMAND,
                                timeout_duration_minutes=5
                            )
                        ),
                        target_on_demand_capacity=0,
                        target_spot_capacity=1
                    ),
                    _sfn_tasks.EmrCreateCluster.InstanceFleetConfigProperty(
                        instance_fleet_type=_sfn_tasks.EmrCreateCluster.InstanceRoleType.CORE,
                        instance_type_configs=[
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5.xlarge',
                                weighted_capacity=1
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5.2xlarge',
                                weighted_capacity=2
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5a.xlarge',
                                weighted_capacity=1
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5a.2xlarge',
                                weighted_capacity=2
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m4.xlarge',
                                weighted_capacity=1
                            )
                        ],
                        launch_specifications=_sfn_tasks.EmrCreateCluster.InstanceFleetProvisioningSpecificationsProperty(
                            spot_specification=_sfn_tasks.EmrCreateCluster.SpotProvisioningSpecificationProperty(
                                timeout_action=_sfn_tasks.EmrCreateCluster.SpotTimeoutAction.SWITCH_TO_ON_DEMAND,
                                timeout_duration_minutes=5
                            )
                        ),
                        target_on_demand_capacity=0,
                        target_spot_capacity=DataGenConfig.BATCH_CLUSTER_SIZE[tshirt_size]

                    )
                ]
            )
        ).add_catch(handler=terminate_cluster_error, result_path="$.error")

        configure_datagen = _sfn_tasks.LambdaInvoke(
            self, "BatchConfigureDatagenTask",
            lambda_function=configure_datagen_function,
            payload=_sfn.TaskInput.from_text('{'
                                             '"Param": "batch_iterator",'
                                             '"Module": "batch",'
                                             '"SinkBucket": "'+sink_bucket.s3_url_for_object()+'",'
                                             '"Parallelism": "'+str(int(DataGenConfig.BATCH_DATA_SIZE[tshirt_size])*2)+'",'
                                             '"DataSize": "'+DataGenConfig.BATCH_DATA_SIZE[tshirt_size]+'",'
                                             '"TmpBucket": "fake-bucket"'
                                             '}'),
            result_path='$.Config'
        ).add_catch(handler=terminate_cluster_error, result_path="$.error")

        add_datagen_step = _sfn.CustomState(
            self, 'BatchAddDataGenStep',
            state_json={
                "Type": "Task",
                "Resource": "arn:aws:states:::elasticmapreduce:addStep.sync",
                "Parameters": {
                    "ClusterId.$": "$.Emr.Cluster.Id",
                    "Step": {
                        "Name": "DatagenStep",
                        "ActionOnFailure": "CONTINUE",
                        "HadoopJarStep": {
                            "Jar": "command-runner.jar",
                            "Args.$": "$.Config.Payload.StepParam"
                        }
                    }
                },
                "ResultPath": "$.Step",
                "Next": "BatchUpdateIterator",
                "Catch": [
                    {
                        "ErrorEquals": ["States.ALL"],
                        "Next": "BatchDeleteClusterError",
                        "ResultPath": "$.error"
                    }
                ]
            }
        )

        update_iterator = _sfn_tasks.DynamoUpdateItem(
            self, 'BatchUpdateIterator',
            table=config_table,
            key={
                'param': _sfn_tasks.DynamoAttributeValue.from_string('batch_iterator')
            },
            update_expression='SET iterator = if_not_exists(iterator, :start) + :inc',
            expression_attribute_values={
                ":inc": _sfn_tasks.DynamoAttributeValue.from_number(1),
                ":start": _sfn_tasks.DynamoAttributeValue.from_number(0)
            },
            result_path=_sfn.JsonPath.DISCARD
        )

        definition = configure_datagen \
            .next(create_cluster) \
            .next(add_datagen_step) \
            .next(update_iterator) \
            .next(terminate_cluster)

        datagen_stepfunctions = _sfn.StateMachine(
            self, "BatchDataGenStepFunctions",
            definition=definition,
            timeout=core.Duration.minutes(30)
        )

        datagen_stepfunctions.add_to_role_policy(
            _iam.PolicyStatement(
                actions=[
                    'elasticmapreduce:AddJobFlowSteps',
                    'elasticmapreduce:DescribeStep'
                ],
                resources=['*']
            )
        )
        datagen_stepfunctions.add_to_role_policy(
            _iam.PolicyStatement(
                actions= [
                    "iam:CreateServiceLinkedRole",
                    "iam:PutRolePolicy"
                ],
                resources=["arn:aws:iam::*:role/aws-service-role/elasticmapreduce.amazonaws.com*/AWSServiceRoleForEMRCleanup*"],
                conditions= {
                    "StringLike": {
                        "iam:AWSServiceName": [
                            "elasticmapreduce.amazonaws.com",
                            "elasticmapreduce.amazonaws.com.cn"
                        ]
                    }
                }
            )
        )

        step_trigger = _events.Rule(
            self, 'BatchSteptrigger',
            schedule=_events.Schedule.cron(minute='0/30',
                                           hour='*',
                                           month='*',
                                           week_day='*',
                                           year='*')
        )

        step_trigger.add_target(_events_targets.SfnStateMachine(machine=datagen_stepfunctions))

        with open('common/common_cdk/lambda/stepfunctions_trigger.py', 'r') as f:
            lambda_source = f.read()

        stepfunctions_trigger_lambda = _lambda.SingletonFunction(
            self, 'BatchStepFunctionsTriggerLambda',
            uuid="9597f6f2-f840-11ea-adc1-0242ac120002",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            function_name='stepfunctions-batch-datagen-trigger'
        )

        stepfunctions_trigger_lambda.role.add_to_policy(
            _iam.PolicyStatement(
                actions=["states:StartExecution"],
                resources=['*']
            )
        )

        trigger_step_lambda_provider = _custom_resources.Provider(
            self, 'StepFunctionsTriggerLambdaProvider',
            on_event_handler=stepfunctions_trigger_lambda
        )

        core.CustomResource(
            self, 'StepFunctionsTrigger',
            service_token=trigger_step_lambda_provider.service_token,
            properties={
                "stepArn": datagen_stepfunctions.state_machine_arn
            }
        )

        # terminate clusters
        with open('common/common_cdk/lambda/stepfunctions_terminate_emr.py', 'r') as f:
            lambda_source = f.read()

        sfn_terminate = _lambda.SingletonFunction(
            self, 'StepFuncTerminateBatch',
            uuid='58a9a422-ff07-11ea-adc1-0242ac120002',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            timeout=core.Duration.minutes(5)
        )

        sfn_terminate.role.add_to_policy(
            _iam.PolicyStatement(
                actions=[
                    'elasticmapreduce:ListClusters',
                    'elasticmapreduce:TerminateJobFlows',
                    'states:ListStateMachines',
                    'states:ListExecutions',
                    'states:StopExecution'
                ],
                resources=['*']
            )
        )

        sfn_terminate_provider = _custom_resources.Provider(
            self, 'StepFuncTerminateBatchLambdaProvider',
            on_event_handler=sfn_terminate
        )

        core.CustomResource(
            self, 'StepFuncTerminateBatchCustomResource',
            service_token=sfn_terminate_provider.service_token,
            properties={
                "state_machine": 'BatchDatagen'
            })
Пример #10
0
    def __init__(self, scope: core.Construct, id: str, log_bucket: _s3.Bucket,
                 config_table: _dynamodb.Table, tshirt_size: str,
                 sink_bucket: _s3.Bucket, web_sale_stream: str,
                 web_customer_stream: str, web_customer_address_stream: str,
                 kinesis_key: _kms.Key, vpc: _ec2.Vpc, **kwargs) -> None:

        super().__init__(scope, id, **kwargs)

        stack = core.Stack.of(self)

        stream_source_bucket = AutoEmptyBucket(
            self,
            'StreamSource',
            bucket_name='ara-stream-source-' + core.Aws.ACCOUNT_ID,
            uuid='95505f50-0276-11eb-adc1-0242ac120002')

        service_role = _iam.Role(
            self,
            'StreamEmrServiceRole',
            assumed_by=_iam.ServicePrincipal('elasticmapreduce.amazonaws.com'))

        service_role.add_managed_policy(
            _iam.ManagedPolicy.from_aws_managed_policy_name(
                'service-role/AmazonElasticMapReduceRole'))

        cluster_role = _iam.Role(
            self,
            'StreamEmrClusterRole',
            assumed_by=_iam.ServicePrincipal("ec2.amazonaws.com"))

        _iam.Policy(
            self,
            'StreamEmrClusterPolicy',
            statements=[
                _iam.PolicyStatement(actions=[
                    "glue:CreateDatabase",
                    "glue:UpdateDatabase",
                    "glue:DeleteDatabase",
                    "glue:GetDatabase",
                    "glue:GetDatabases",
                    "glue:CreateTable",
                    "glue:UpdateTable",
                    "glue:DeleteTable",
                    "glue:GetTable",
                    "glue:GetTables",
                    "glue:GetTableVersions",
                    "glue:CreatePartition",
                    "glue:BatchCreatePartition",
                    "glue:UpdatePartition",
                    "glue:DeletePartition",
                    "glue:BatchDeletePartition",
                    "glue:GetPartition",
                    "glue:GetPartitions",
                    "glue:BatchGetPartition",
                    "glue:CreateUserDefinedFunction",
                    "glue:UpdateUserDefinedFunction",
                    "glue:DeleteUserDefinedFunction",
                    "glue:GetUserDefinedFunction",
                    "glue:GetUserDefinedFunctions",
                    "cloudwatch:PutMetricData",
                    "dynamodb:ListTables",
                    "s3:HeadBucket",
                    "ec2:Describe*",
                ],
                                     resources=['*']),
                _iam.PolicyStatement(
                    actions=['s3:GetObject'],
                    resources=[
                        'arn:aws:s3:::' + ARA_BUCKET_NAME + BINARIES +
                        DataGenConfig.DSDGEN_INSTALL_SCRIPT, 'arn:aws:s3:::' +
                        ARA_BUCKET_NAME + BINARIES + DataGenConfig.JAR_FILE
                    ]),
                _iam.PolicyStatement(
                    actions=['s3:PutObject'],
                    resources=[log_bucket.bucket_arn + "/data-generator/*"]),
                _iam.PolicyStatement(
                    actions=[
                        "s3:AbortMultipartUpload", "s3:CreateBucket",
                        "s3:DeleteObject", "s3:GetBucketVersioning",
                        "s3:GetObject", "s3:GetObjectTagging",
                        "s3:GetObjectVersion", "s3:ListBucket",
                        "s3:ListBucketMultipartUploads",
                        "s3:ListBucketVersions", "s3:ListMultipartUploadParts",
                        "s3:PutBucketVersioning", "s3:PutObject",
                        "s3:PutObjectTagging"
                    ],
                    resources=[
                        sink_bucket.bucket_arn + '/*', sink_bucket.bucket_arn,
                        stream_source_bucket.bucket.bucket_arn + '/*',
                        stream_source_bucket.bucket.bucket_arn
                    ])
            ],
            roles=[cluster_role])

        cluster_role.add_managed_policy(
            _iam.ManagedPolicy.from_aws_managed_policy_name(
                'AmazonSSMManagedInstanceCore'))

        _iam.CfnInstanceProfile(self,
                                'StreamEmrClusterInstanceProfile',
                                roles=[cluster_role.role_name],
                                instance_profile_name=cluster_role.role_name)

        # Security Groups for the EMR cluster (private subnet)
        # https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-man-sec-groups.html#emr-sg-elasticmapreduce-master-private
        master_sg = _ec2.SecurityGroup(self,
                                       'ElasticMapReduce-Master-Private',
                                       vpc=vpc)
        slave_sg = _ec2.SecurityGroup(self,
                                      'ElasticMapReduce-Slave-Private',
                                      vpc=vpc)
        service_sg = _ec2.SecurityGroup(self,
                                        'ElasticMapReduce-ServiceAccess',
                                        vpc=vpc,
                                        allow_all_outbound=False)

        # Service SG used by the proxy instance
        service_sg.add_ingress_rule(master_sg, _ec2.Port.tcp(9443))
        service_sg.add_egress_rule(master_sg, _ec2.Port.tcp(8443))
        service_sg.add_egress_rule(slave_sg, _ec2.Port.tcp(8443))

        # EMR Master
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_icmp())
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_tcp())
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_udp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_icmp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_tcp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_udp())
        master_sg.add_ingress_rule(service_sg, _ec2.Port.tcp(8443))

        # EMR Slave
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_icmp())
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_tcp())
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_udp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_icmp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_tcp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_udp())
        slave_sg.add_ingress_rule(service_sg, _ec2.Port.tcp(8443))

        with open('common/common_cdk/lambda/datagen_config.py', 'r') as f:
            lambda_source = f.read()

        configure_datagen_function = _lambda.SingletonFunction(
            self,
            'StreamConfigureDatagenLambda',
            uuid="a9904dec-01cf-11eb-adc1-0242ac120002",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            function_name='stream-datagen-config',
            environment={
                'TABLE_NAME': config_table.table_name,
                'JAR_LOCATION': BINARIES_LOCATION + DataGenConfig.JAR_FILE,
            },
            timeout=core.Duration.seconds(10))

        configure_datagen_function.role.add_to_policy(
            _iam.PolicyStatement(actions=[
                'dynamodb:GetItem',
                'dynamodb:PutItem',
            ],
                                 resources=[config_table.table_arn]))

        emr_cluster = _emr.CfnCluster(
            self,
            'StreamEmrCluster',
            name="StreamDatagenCluster",
            job_flow_role=cluster_role.role_name,
            service_role=service_role.role_name,
            release_label='emr-5.30.1',
            visible_to_all_users=True,
            log_uri=log_bucket.s3_url_for_object() + "/data-generator",
            applications=[
                _emr.CfnCluster.ApplicationProperty(name='hadoop'),
                _emr.CfnCluster.ApplicationProperty(name='spark')
            ],
            bootstrap_actions=[
                _emr.CfnCluster.BootstrapActionConfigProperty(
                    name="dsdgen-install",
                    script_bootstrap_action=_emr.CfnCluster.
                    ScriptBootstrapActionConfigProperty(
                        path=BINARIES_LOCATION +
                        DataGenConfig.DSDGEN_INSTALL_SCRIPT))
            ],
            instances=_emr.CfnCluster.JobFlowInstancesConfigProperty(
                emr_managed_master_security_group=master_sg.security_group_id,
                emr_managed_slave_security_group=slave_sg.security_group_id,
                service_access_security_group=service_sg.security_group_id,
                ec2_subnet_id=vpc.private_subnets[0].subnet_id,
                core_instance_group=_emr.CfnCluster.
                InstanceGroupConfigProperty(instance_count=DataGenConfig.
                                            BATCH_CLUSTER_SIZE[tshirt_size],
                                            instance_type='m5.xlarge'),
                master_instance_group=_emr.CfnCluster.
                InstanceGroupConfigProperty(instance_count=1,
                                            instance_type='m4.large')))

        configure_datagen = _sfn_tasks.LambdaInvoke(
            self,
            "ConfigureDatagenTask",
            lambda_function=configure_datagen_function,
            payload=_sfn.TaskInput.from_text(
                '{'
                '"Param": "stream_iterator",'
                '"Module": "stream",'
                '"SinkBucket": "' + sink_bucket.s3_url_for_object() + '",'
                '"Parallelism": "' +
                str(int(DataGenConfig.STREAM_DATA_SIZE[tshirt_size]) * 2) +
                '",'
                '"DataSize": "' + DataGenConfig.STREAM_DATA_SIZE[tshirt_size] +
                '",'
                '"TmpBucket": "' +
                str(stream_source_bucket.bucket.s3_url_for_object()) + '"'
                '}'),
            result_path='$.Config')

        add_datagen_step = _sfn.CustomState(
            self,
            'StreamAddDataGenStep',
            state_json={
                "Type": "Task",
                "Resource": "arn:aws:states:::elasticmapreduce:addStep.sync",
                "Parameters": {
                    "ClusterId.$": "$.Emr.Cluster.Id",
                    "Step": {
                        "Name": "DatagenStep",
                        "ActionOnFailure": "CONTINUE",
                        "HadoopJarStep": {
                            "Jar": "command-runner.jar",
                            "Args.$": "$.Config.Payload.StepParam"
                        }
                    }
                },
                "ResultPath": "$.Step",
                "Next": "StreamUpdateIterator"
            })

        update_iterator = _sfn_tasks.DynamoUpdateItem(
            self,
            'StreamUpdateIterator',
            table=config_table,
            key={
                'param':
                _sfn_tasks.DynamoAttributeValue.from_string('stream_iterator')
            },
            update_expression=
            'SET iterator = if_not_exists(iterator, :start) + :inc',
            expression_attribute_values={
                ":inc": _sfn_tasks.DynamoAttributeValue.from_number(1),
                ":start": _sfn_tasks.DynamoAttributeValue.from_number(0)
            },
            result_path=_sfn.JsonPath.DISCARD)

        definition = configure_datagen \
            .next(add_datagen_step) \
            .next(update_iterator)

        datagen_stepfunctions = _sfn.StateMachine(
            self,
            "StreamDataGenStepFunctions",
            definition=definition,
            timeout=core.Duration.minutes(30))

        datagen_stepfunctions.add_to_role_policy(
            _iam.PolicyStatement(actions=[
                'elasticmapreduce:AddJobFlowSteps',
                'elasticmapreduce:DescribeStep'
            ],
                                 resources=['*']))

        step_trigger = _events.Rule(self,
                                    'StreamStepTrigger',
                                    schedule=_events.Schedule.cron(
                                        minute='0/10',
                                        hour='*',
                                        month='*',
                                        week_day='*',
                                        year='*'))

        step_trigger.add_target(
            _events_targets.SfnStateMachine(
                machine=datagen_stepfunctions,
                input=_events.RuleTargetInput.from_object({
                    "Emr": {
                        "Cluster": {
                            "Id": core.Fn.ref(emr_cluster.logical_id)
                        }
                    }
                })))

        with open('common/common_cdk/lambda/stepfunctions_trigger.py',
                  'r') as f:
            lambda_source = f.read()

        stepfunctions_trigger_lambda = _lambda.SingletonFunction(
            self,
            'StreamStepFunctionsTriggerLambda',
            uuid="cf042246-01d0-11eb-adc1-0242ac120002",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            function_name='stepfunctions-stream-datagen-trigger')

        stepfunctions_trigger_lambda.role.add_to_policy(
            _iam.PolicyStatement(actions=["states:StartExecution"],
                                 resources=['*']))

        trigger_step_lambda_provider = _custom_resources.Provider(
            self,
            'StreamStepFunctionsTriggerLambdaProvider',
            on_event_handler=stepfunctions_trigger_lambda)

        core.CustomResource(
            self,
            'StreamStepFunctionsTrigger',
            service_token=trigger_step_lambda_provider.service_token,
            properties={"stepArn": datagen_stepfunctions.state_machine_arn})

        with open('common/common_cdk/lambda/stream_generator.py', 'r') as f:
            lambda_source = f.read()

        sale_stream_generator_lambda = _lambda.Function(
            scope=self,
            id='WebSaleStreamGenerator',
            runtime=_lambda.Runtime.PYTHON_3_7,
            memory_size=2048,
            timeout=core.Duration.minutes(15),
            code=_lambda.Code.inline(lambda_source),
            handler='index.lambda_handler',
            environment={
                'REGION': core.Aws.REGION,
                'STREAM_NAME': web_sale_stream
            })

        stream_source_bucket.bucket.add_event_notification(
            _s3.EventType.OBJECT_CREATED,
            _s3_notifications.LambdaDestination(sale_stream_generator_lambda),
            _s3.NotificationKeyFilter(prefix='sale', suffix='csv'))

        sale_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(
                actions=[
                    "s3:DeleteObject",
                    "s3:GetObject",
                    "s3:ListBucket",
                ],
                resources=[
                    stream_source_bucket.bucket.bucket_arn + '/*',
                    stream_source_bucket.bucket.bucket_arn
                ]))

        sale_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(actions=["kinesis:PutRecords"],
                                 resources=[
                                     stack.format_arn(
                                         service='kinesis',
                                         resource='stream',
                                         resource_name=web_sale_stream)
                                 ]))

        sale_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(actions=['kms:GenerateDataKey'],
                                 resources=[
                                     stack.format_arn(
                                         service='kms',
                                         resource='key',
                                         sep='/',
                                         resource_name=kinesis_key.key_id)
                                 ]))

        customer_stream_generator_lambda = _lambda.Function(
            scope=self,
            id='WebCustomerStreamGenerator',
            runtime=_lambda.Runtime.PYTHON_3_7,
            memory_size=2048,
            timeout=core.Duration.minutes(15),
            code=_lambda.Code.inline(lambda_source),
            handler='index.lambda_handler',
            environment={
                'REGION': core.Aws.REGION,
                'STREAM_NAME': web_customer_stream
            })

        stream_source_bucket.bucket.add_event_notification(
            _s3.EventType.OBJECT_CREATED,
            _s3_notifications.LambdaDestination(
                customer_stream_generator_lambda),
            _s3.NotificationKeyFilter(prefix='customer', suffix='csv'))

        customer_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(
                actions=[
                    "s3:DeleteObject",
                    "s3:GetObject",
                    "s3:ListBucket",
                ],
                resources=[
                    stream_source_bucket.bucket.bucket_arn + '/*',
                    stream_source_bucket.bucket.bucket_arn
                ]))

        customer_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(actions=["kinesis:PutRecords"],
                                 resources=[
                                     stack.format_arn(
                                         service='kinesis',
                                         resource='stream',
                                         resource_name=web_customer_stream)
                                 ]))

        customer_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(actions=['kms:GenerateDataKey'],
                                 resources=[
                                     stack.format_arn(
                                         service='kms',
                                         resource='key',
                                         sep='/',
                                         resource_name=kinesis_key.key_id)
                                 ]))

        address_stream_generator_lambda = _lambda.Function(
            scope=self,
            id='WebCustomerAddressStreamGenerator',
            runtime=_lambda.Runtime.PYTHON_3_7,
            memory_size=2048,
            timeout=core.Duration.minutes(15),
            code=_lambda.Code.inline(lambda_source),
            handler='index.lambda_handler',
            environment={
                'REGION': core.Aws.REGION,
                'STREAM_NAME': web_customer_address_stream
            })

        stream_source_bucket.bucket.add_event_notification(
            _s3.EventType.OBJECT_CREATED,
            _s3_notifications.LambdaDestination(
                address_stream_generator_lambda),
            _s3.NotificationKeyFilter(prefix='address', suffix='csv'))

        address_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(
                actions=[
                    "s3:DeleteObject",
                    "s3:GetObject",
                    "s3:ListBucket",
                ],
                resources=[
                    stream_source_bucket.bucket.bucket_arn + '/*',
                    stream_source_bucket.bucket.bucket_arn
                ]))

        address_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(
                actions=["kinesis:PutRecords"],
                resources=[
                    stack.format_arn(service='kinesis',
                                     resource='stream',
                                     resource_name=web_customer_address_stream)
                ]))

        address_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(actions=['kms:GenerateDataKey'],
                                 resources=[
                                     stack.format_arn(
                                         service='kms',
                                         resource='key',
                                         sep='/',
                                         resource_name=kinesis_key.key_id)
                                 ]))
Пример #11
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        pvt_bkt = _s3.Bucket(self, "s3bucket")
        core.Tag.add(pvt_bkt, key="isMonitoredBucket", value="True")

        # Lets create a cloudtrail to track s3 data events
        s3_data_event_trail = _cloudtrail.Trail(
            self,
            "s3DataEventTrailId",
            is_multi_region_trail=False,
            include_global_service_events=False,
            enable_file_validation=True)

        # Lets capture S3 Data Events only for our bucket- TO REDUCE COST
        s3_data_event_trail.add_s3_event_selector(
            prefixes=[f"{pvt_bkt.bucket_arn}/"],
            include_management_events=True,
            read_write_type=_cloudtrail.ReadWriteType.ALL)

        # Defines an AWS Lambda resource
        """
        with open("lambda_src/make_object_private.py", encoding="utf8") as fp:
            make_object_private_fn_handler_code = fp.read()

        remediate_object_acl_fn = _lambda.Function(
            self,
            id='remediateObjAclFn',
            function_name="remediate_object_acl_fn",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.InlineCode(make_object_private_fn_handler_code),
            handler='index.lambda_handler',
            timeout=core.Duration.seconds(10)
            )

        # Lets add the necessary permission for the lambda function
        remediate_object_acl_fn_perms=_iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=[
                "arn:aws:s3:::*",
                ],
            actions=[
                "s3:GetObjectAcl",
                "s3:PutObjectAcl"
            ]
            )
        remediate_object_acl_fn_perms.sid="PutBucketPolicy"
        remediate_object_acl_fn.add_to_role_policy( remediate_object_acl_fn_perms )
        """

        with open("lambda_src/is_object_private.py", encoding="utf8") as fp:
            is_object_private_fn_handler_code = fp.read()

        is_object_private_fn = _lambda.Function(
            self,
            id='isObjPrivateFn',
            function_name="is_object_private_fn",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.InlineCode(is_object_private_fn_handler_code),
            handler='index.lambda_handler',
            timeout=core.Duration.seconds(3))

        # Lets add the necessary permission for the lambda function
        is_object_private_fn_perms = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=[
                "arn:aws:s3:::*",
            ],
            actions=["s3:GetObjectAcl"])
        is_object_private_fn.sid = "CheckObjectAcl"
        is_object_private_fn.add_to_role_policy(is_object_private_fn_perms)

        with open("lambda_src/make_object_private.py", encoding="utf8") as fp:
            make_object_private_fn_handler_code = fp.read()

        remediate_object_acl_fn = _lambda.Function(
            self,
            id='remediateObjAclFn',
            function_name="remediate_object_acl_fn",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.InlineCode(make_object_private_fn_handler_code),
            handler='index.lambda_handler',
            timeout=core.Duration.seconds(10))

        # Lets add the necessary permission for the lambda function
        remediate_object_acl_fn_perms = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=[
                "arn:aws:s3:::*",
            ],
            actions=["s3:PutObjectAcl"])
        remediate_object_acl_fn_perms.sid = "PutObjectAcl"
        remediate_object_acl_fn.add_to_role_policy(
            remediate_object_acl_fn_perms)

        info_sec_ops_topic = _sns.Topic(self,
                                        "infoSecOpsTopicId",
                                        display_name="InfoSecTopic",
                                        topic_name="InfoSecOpsTopic")

        # Subscribe InfoSecOps Email to topic
        info_sec_ops_topic.add_subscription(
            _subs.EmailSubscription(global_args.INFO_SEC_OPS_EMAIL))

        # Grant Lambda permission to publish to topic
        # info_sec_ops_topic.grant_publish(lambda_notifier)

        # State Machine for notifying failed ACLs
        # Ref: https://docs.aws.amazon.com/cdk/api/latest/docs/aws-stepfunctions-readme.html
        ###############################################################################
        ################# STEP FUNCTIONS EXPERIMENTAL CODE - UNSTABLE #################
        ###############################################################################

        is_object_private_task = _sfn.Task(
            self,
            "isObjectPrivate?",
            task=_tasks.InvokeFunction(is_object_private_fn),
            result_path="$",
            output_path="$")

        remediate_object_acl_task = _sfn.Task(
            self,
            "RemediateObjectAcl",
            task=_tasks.InvokeFunction(remediate_object_acl_fn),
            result_path="$",
            output_path="$")

        notify_secops_task = _sfn.Task(
            self,
            "Notify InfoSecOps",
            task=_tasks.PublishToTopic(
                info_sec_ops_topic,
                integration_pattern=_sfn.ServiceIntegrationPattern.
                FIRE_AND_FORGET,
                message=_sfn.TaskInput.from_data_at("$.sns_message"),
                subject="Object Acl Remediation"))

        acl_remediation_failed_task = _sfn.Fail(self,
                                                "Acl Remediation Failed",
                                                cause="Acl Remediation Failed",
                                                error="Check Logs")

        acl_compliant_task = _sfn.Succeed(self,
                                          "Object Acl Compliant",
                                          comment="Object Acl is Compliant")

        remediate_object_acl_sfn_definition = is_object_private_task\
            .next(_sfn.Choice(self, "Is Object Private?")\
                .when(_sfn.Condition.boolean_equals("$.is_private", True), acl_compliant_task)\
                .when(_sfn.Condition.boolean_equals("$.is_private", False), remediate_object_acl_task\
                    .next(_sfn.Choice(self, "Object Remediation Complete?")\
                        .when(_sfn.Condition.boolean_equals("$.status", True),acl_compliant_task)\
                        .when(_sfn.Condition.boolean_equals("$.status", False), notify_secops_task.next(acl_remediation_failed_task))\
                        .otherwise(acl_remediation_failed_task)\
                        )
                    )
                .otherwise(acl_remediation_failed_task)
            )

        remediate_object_acl_statemachine = _sfn.StateMachine(
            self,
            "stateMachineId",
            definition=remediate_object_acl_sfn_definition,
            timeout=core.Duration.minutes(3))

        # Cloudwatch Event Triggers
        put_object_acl_event_targets = []
        """
        put_object_acl_event_targets.append(
            _targets.LambdaFunction( 
                handler=remediate_object_acl_fn
                )
            )
        """
        put_object_acl_event_targets.append(
            _targets.SfnStateMachine(
                machine=remediate_object_acl_statemachine))

        put_object_acl_event_pattern = _events.EventPattern(
            source=["aws.s3"],
            detail_type=["AWS API Call via CloudTrail"],
            detail={
                "eventSource": ["s3.amazonaws.com"],
                "eventName": ["PutObjectAcl", "PutObject"],
                "requestParameters": {
                    "bucketName": [f"{pvt_bkt.bucket_name}"]
                }
            })

        put_object_acl_event_pattern_rule = _events.Rule(
            self,
            "putObjectAclEventId",
            event_pattern=put_object_acl_event_pattern,
            rule_name=f"put_s3_policy_event_{global_args.OWNER}",
            enabled=True,
            description="Trigger an event for S3 PutObjectAcl or PutObject",
            targets=put_object_acl_event_targets)

        ###########################################
        ################# OUTPUTS #################
        ###########################################

        output0 = core.CfnOutput(
            self,
            "SecuirtyAutomationFrom",
            value=f"{global_args.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page."
        )

        output1 = core.CfnOutput(
            self,
            "MonitoredS3Bucket",
            value=(f"https://console.aws.amazon.com/s3/buckets/"
                   f"{pvt_bkt.bucket_name}"),
            description=f"S3 Bucket for testing purposes")

        output2 = core.CfnOutput(
            self,
            "Helpercommands",
            value=
            (f"aws s3api get-object-acl  --bucket ${pvt_bkt.bucket_name} --key OBJECT-KEY-NAME"
             ),
            description=
            f"Commands to set object to public, Update OBJECT-KEY-NAME to your needs"
        )
Пример #12
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # A cache to temporarily hold session info
        session_cache_table = aws_dynamodb.Table(
            self,
            'session_cache_table',
            partition_key={
                'name': 'code',
                'type': aws_dynamodb.AttributeType.STRING
            },
            billing_mode=aws_dynamodb.BillingMode.PAY_PER_REQUEST,
            time_to_live_attribute='expires')

        #--
        #  Secrets
        #--------------------#

        # Twitter secrets are stored external to this stack
        twitter_secret = aws_secretsmanager.Secret.from_secret_attributes(
            self,
            'twitter_secret',
            secret_arn=os.environ['TWITTER_SECRET_ARN'])

        #--
        #  Layers
        #--------------------#

        # Each of these dependencies is used in 2 or more functions, extracted to layer for ease of use
        twitter_layer = aws_lambda.LayerVersion(
            self,
            'twitter_layer',
            code=aws_lambda.AssetCode('layers/twitter_layer'),
            compatible_runtimes=[
                aws_lambda.Runtime.PYTHON_2_7, aws_lambda.Runtime.PYTHON_3_6
            ])

        boto_layer = aws_lambda.LayerVersion(
            self,
            'boto_layer',
            code=aws_lambda.AssetCode('layers/boto_layer'),
            compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_6])

        #--
        #  Functions
        #--------------------#

        # Handles CRC validation requests from Twitter
        twitter_crc_func = aws_lambda.Function(
            self,
            "twitter_crc_func",
            code=aws_lambda.AssetCode('functions/twitter_crc_func'),
            handler="lambda.handler",
            layers=[twitter_layer],
            runtime=aws_lambda.Runtime.PYTHON_2_7,
            environment={'twitter_secret': twitter_secret.secret_arn})

        # Grant this function the ability to read Twitter credentials
        twitter_secret.grant_read(twitter_crc_func.role)

        # Handle schedule requests from Twitter
        twitter_webhook_func = aws_lambda.Function(
            self,
            "twitter_webhook_func",
            code=aws_lambda.AssetCode('functions/twitter_webhook_func'),
            handler="lambda.handler",
            layers=[boto_layer, twitter_layer],
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            environment={'twitter_secret': twitter_secret.secret_arn})

        # Grant this function permission to read Twitter credentials
        twitter_secret.grant_read(twitter_webhook_func.role)

        # Grant this function permission to publish tweets to EventBridge
        twitter_webhook_func.add_to_role_policy(
            aws_iam.PolicyStatement(actions=["events:PutEvents"],
                                    resources=["*"]))

        # Use API Gateway as the webhook endpoint
        twitter_api = aws_apigateway.LambdaRestApi(
            self, 'twitter_api', handler=twitter_webhook_func, proxy=False)

        # Tweets are POSTed to the endpoint
        twitter_api.root.add_method('POST')

        # Handles twitter CRC validation requests via GET to the webhook
        twitter_api.root.add_method(
            'GET', aws_apigateway.LambdaIntegration(twitter_crc_func))

        # Extract relevant info from the tweet, including session codes
        parse_tweet_func = aws_lambda.Function(
            self,
            "parse_tweet_func",
            code=aws_lambda.AssetCode('functions/parse_tweet_func'),
            handler="lambda.handler",
            runtime=aws_lambda.Runtime.PYTHON_3_6)

        # Get session information for requested codes
        get_sessions_func = aws_lambda.Function(
            self,
            "get_sessions_func",
            code=aws_lambda.AssetCode('functions/get_sessions_func'),
            handler="lambda.handler",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            timeout=core.Duration.seconds(60),
            layers=[boto_layer],
            environment={
                'CACHE_TABLE': session_cache_table.table_name,
                'LOCAL_CACHE_TTL':
                str(1 * 60 * 60),  # Cache sessions locally for 1 hour
                'REMOTE_CACHE_TTL': str(12 * 60 * 60)
            })  # Cache sessions removely for 12 hours

        # This functions needs permissions to read and write to the table
        session_cache_table.grant_write_data(get_sessions_func)
        session_cache_table.grant_read_data(get_sessions_func)

        # Create a schedule without conflicts
        create_schedule_func = aws_lambda.Function(
            self,
            "create_schedule_func",
            code=aws_lambda.AssetCode('functions/create_schedule_func'),
            handler="lambda.handler",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            timeout=core.Duration.seconds(60))

        # Tweet the response to the user
        tweet_schedule_func = aws_lambda.Function(
            self,
            "tweet_schedule_func",
            code=aws_lambda.AssetCode('functions/tweet_schedule_func'),
            handler="lambda.handler",
            layers=[boto_layer, twitter_layer],
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            environment={'twitter_secret': twitter_secret.secret_arn})
        twitter_secret.grant_read(tweet_schedule_func.role)

        #--
        #  States
        #--------------------#

        # Step 4
        tweet_schedule_job = aws_stepfunctions.Task(
            self,
            'tweet_schedule_job',
            task=aws_stepfunctions_tasks.InvokeFunction(tweet_schedule_func))

        # Step 3
        create_schedule_job = aws_stepfunctions.Task(
            self,
            'create_schedule_job',
            task=aws_stepfunctions_tasks.InvokeFunction(create_schedule_func),
            input_path="$.sessions",
            result_path="$.schedule")
        create_schedule_job.next(tweet_schedule_job)

        # Step 2 - Get associated sessions (scrape or cache)
        get_sessions_job = aws_stepfunctions.Task(
            self,
            'get_sessions_job',
            task=aws_stepfunctions_tasks.InvokeFunction(get_sessions_func))

        # Prepare to get session info in parallel using the Map state
        get_sessions_map = aws_stepfunctions.Map(self,
                                                 'get_sessions_map',
                                                 items_path="$.codes",
                                                 result_path="$.sessions")
        get_sessions_map.iterator(get_sessions_job)
        get_sessions_map.next(create_schedule_job)

        # Shortcut if no session codes are supplied
        check_num_codes = aws_stepfunctions.Choice(self, 'check_num_codes')
        check_num_codes.when(
            aws_stepfunctions.Condition.number_greater_than('$.num_codes', 0),
            get_sessions_map)
        check_num_codes.otherwise(aws_stepfunctions.Succeed(self, "no_codes"))

        # Step 1 - Parse incoming tweet and prepare for scheduling
        parse_tweet_job = aws_stepfunctions.Task(
            self,
            'parse_tweet_job',
            task=aws_stepfunctions_tasks.InvokeFunction(parse_tweet_func))
        parse_tweet_job.next(check_num_codes)

        #--
        #  State Machines
        #--------------------#

        schedule_machine = aws_stepfunctions.StateMachine(
            self, "schedule_machine", definition=parse_tweet_job)

        # A rule to filter reInventSched tweet events
        reinvent_sched_rule = aws_events.Rule(
            self,
            "reinvent_sched_rule",
            event_pattern={"source": ["reInventSched"]})

        # Matching events start the image pipline
        reinvent_sched_rule.add_target(
            aws_events_targets.SfnStateMachine(
                schedule_machine,
                input=aws_events.RuleTargetInput.from_event_path("$.detail")))
Пример #13
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        email_subscription_parameter = core.CfnParameter(
            self,
            "EmailSubscriptionParameter",
            description="Email Address for Notification Subscription")
        email_subscription = email_subscription_parameter.value_as_string

        ar1 = accessanalyzer.CfnAnalyzer.ArchiveRuleProperty(
            rule_name="test",
            filter=[
                accessanalyzer.CfnAnalyzer.FilterProperty(
                    property="principal.AWS", eq=["123456789123"])
            ])
        analyzer = accessanalyzer.CfnAnalyzer(
            self,
            id="accessanalyzer",
            type="ACCOUNT",
            tags=[core.CfnTag(key="AccessAnalyzerType", value="ACCOUNT")],
            archive_rules=[ar1])

        runtime = aws_lambda.Runtime.PYTHON_3_8

        boto3_lambda_layer = aws_lambda.LayerVersion(
            self,
            "Boto3LambdaLayer",
            code=aws_lambda.AssetCode("./layers/boto3"),
            compatible_runtimes=[runtime],
            description="Boto3 Lambda Layer")

        context_enrichment = aws_lambda.Function(
            self,
            "context_enrichment",
            runtime=runtime,
            handler="app.handler",
            code=aws_lambda.AssetCode("./functions/context-enrichment"),
            layers=[boto3_lambda_layer])
        handler_statement = iam.PolicyStatement(actions=[
            "iam:ListRoleTags", "s3:GetBucketTagging", "lambda:ListTags",
            "sqs:ListQueueTags", "kms:ListAliases", "kms:ListResourceTags"
        ],
                                                effect=iam.Effect.ALLOW,
                                                resources=["*"])
        context_enrichment.add_to_role_policy(handler_statement)

        cmk_key = kms.Key(
            self,
            "SNSEncryptionAtRestKey",
            description="SNS Encryption at rest key",
            alias="sns-encryption-at-rest",
            enable_key_rotation=True,
        )

        email_topic = sns.Topic(
            self,
            "AccessAnalyzerNotificationTopic",
            display_name="Access Analyzer Finding Notification Topic",
            master_key=cmk_key)
        email_topic.add_subscription(
            subscriptions.EmailSubscription(email_subscription))

        notification = aws_lambda.Function(
            self,
            "notification",
            runtime=runtime,
            handler="app.handler",
            code=aws_lambda.AssetCode("./functions/notification"),
            layers=[boto3_lambda_layer],
            environment={"SNS_TOPIC_ARN": email_topic.topic_arn})
        notification_statement = iam.PolicyStatement(actions=[
            "sns:Publish",
        ],
                                                     effect=iam.Effect.ALLOW,
                                                     resources=["*"])
        notification.add_to_role_policy(notification_statement)
        cmk_key.grant_encrypt_decrypt(notification)

        archive_access_analyzer_finding = aws_lambda.Function(
            self,
            "archive-access-analyzer-finding",
            runtime=runtime,
            handler="app.handler",
            code=aws_lambda.AssetCode(
                "./functions/archive-access-analyzer-finding"),
            layers=[boto3_lambda_layer])
        archive_statement = iam.PolicyStatement(actions=[
            "access-analyzer:UpdateFindings",
        ],
                                                effect=iam.Effect.ALLOW,
                                                resources=["*"])
        archive_access_analyzer_finding.add_to_role_policy(archive_statement)

        evaluate_access_analyzer_finding = aws_lambda.Function(
            self,
            "evaluate-access-analyzer-finding",
            runtime=runtime,
            handler="app.handler",
            code=aws_lambda.AssetCode(
                "./functions/evaluate-access-analyzer-finding"),
            layers=[boto3_lambda_layer])

        #https://docs.aws.amazon.com/cdk/api/latest/docs/aws-stepfunctions-readme.html
        access_analyzer_handler_task = sfn.Task(
            self,
            "Context Enrichment",
            task=sfn_tasks.InvokeFunction(context_enrichment),
            result_path="$.guid",
        )

        notification_task = sfn.Task(
            self,
            "Send Notification",
            task=sfn_tasks.InvokeFunction(notification),
            result_path="$.guid",
        )

        archive_task = sfn.Task(
            self,
            "Archive Finding",
            task=sfn_tasks.InvokeFunction(archive_access_analyzer_finding),
            result_path="$.guid",
        )

        evaluate_task = sfn.Task(
            self,
            "Evaluate Risk Level",
            task=sfn_tasks.InvokeFunction(evaluate_access_analyzer_finding),
            result_path="$.guid",
        )

        definition=access_analyzer_handler_task. \
          next(evaluate_task). \
          next(sfn.Choice(self, "Archive?"). \
            when(sfn.Condition.string_equals("$.guid.status", "ARCHIVE"), archive_task). \
            when(sfn.Condition.string_equals("$.guid.status", "NOTIFY"), notification_task) \
          )

        state_machine = sfn.StateMachine(
            self,
            "Access-Analyzer-Automatic-Finding-Archive-State-Machine",
            definition=definition,
            timeout=core.Duration.minutes(5),
        )

        #https://docs.aws.amazon.com/step-functions/latest/dg/tutorial-cloudwatch-events-s3.html
        access_analyzer_finding_rule = aws_events.Rule(
            self,
            "AccessAnalzyerFindingActiveEventRule",
            description="Access Analyzer Finding Event Active",
            enabled=True,
            event_pattern=aws_events.EventPattern(
                source=["aws.access-analyzer"],
                detail_type=["Access Analyzer Finding"],
                detail={"status": ["ACTIVE"]}),
            targets=[
                aws_events_targets.SfnStateMachine(state_machine),
                aws_events_targets.LambdaFunction(context_enrichment)
            ])
Пример #14
0
    def __init__(self, scope: core.Construct, id: str, group_name: str,
                 minute_duration: int, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        # TODO: Setup alerting of failure to an SNS
        # TODO: Failure is not the same as a student not in a group
        # TODO: Streamline input data so that lambda's only get the info they really need
        # TODO: Comment
        # TODO: Need to separate unexpected errors from regular errors
        # Setting up monitoring

        schedule_stop = lambda_.Function(
            self,
            id="ScheduleStopLambda",
            runtime=lambda_.Runtime.PYTHON_3_7,
            code=lambda_.Code.from_inline(
                open("./resources/schedule-termination.py", 'r').read()),
            handler="index.handler",
            log_retention=logs.RetentionDays.ONE_DAY,
            environment=dict(GROUP_NAME=group_name),
            timeout=core.Duration.seconds(30))
        schedule_stop.add_to_role_policy(
            statement=iam.PolicyStatement(actions=[
                "ec2:Describe*", "iam:ListGroupsForUser", "iam:ListUsers"
            ],
                                          effect=iam.Effect.ALLOW,
                                          resources=["*"]))

        terminate_ec2 = lambda_.Function(
            self,
            id="TerminateEC2",
            runtime=lambda_.Runtime.PYTHON_3_7,
            code=lambda_.Code.from_inline(
                open("./resources/terminate-ec2.py", 'r').read()),
            handler="index.handler",
            log_retention=logs.RetentionDays.ONE_DAY,
            timeout=core.Duration.seconds(30))
        terminate_ec2.add_to_role_policy(
            statement=iam.PolicyStatement(actions=[
                "ec2:DescribeInstance*",
                "ec2:TerminateInstances",
            ],
                                          effect=iam.Effect.ALLOW,
                                          resources=["*"]))

        # The lambda object that will see if we should schedule.
        schedule_stop_task = tasks.LambdaInvoke(
            self,
            id='schedule stop',
            lambda_function=schedule_stop,
            input_path="$.detail.userIdentity",
            result_path="$.Payload",
        )
        # TODO: Need to change this based on the configuration info above
        # Wait state to try and delete
        # wait_x = sfn.Wait(self, 'Wait x minutes', time=sfn.WaitTime.seconds_path("10"))
        wait_x = sfn.Wait(self,
                          id='Wait x minutes',
                          time=sfn.WaitTime.duration(
                              core.Duration.minutes(minute_duration)))

        job_failed = sfn.Fail(self,
                              id="Failed Job",
                              cause="Error in the input",
                              error="Error")
        job_finished = sfn.Succeed(self, id="Job Finished")
        choice = sfn.Choice(self, 'Can I delete')
        choice.when(sfn.Condition.boolean_equals('$.Payload.Payload', False),
                    job_finished)
        choice.otherwise(wait_x)
        terminate_ec2_task = tasks.LambdaInvoke(
            self,
            'terminate',
            lambda_function=terminate_ec2,
            input_path="$.detail.responseElements.instancesSet")
        wait_x.next(terminate_ec2_task).next(job_finished)

        state_definition = schedule_stop_task \
            .next(choice)
        terminate_machine = sfn.StateMachine(self,
                                             id="State Machine",
                                             definition=state_definition)
        cloudwatch.Alarm(self,
                         "EC2ScheduleAlarm",
                         metric=terminate_machine.metric_failed(),
                         threshold=1,
                         evaluation_periods=1)
        # TODO Build Rule that monitors for EC2 creation
        # Any new creation, the EC2 will have to be destroyed.  Including
        # other things?
        create_event = events.Rule(
            self,
            id='detect-ec2-start',
            description="Detects if an EC2 is created",
            enabled=True,
            event_pattern=events.EventPattern(
                detail_type=["AWS API Call via CloudTrail"],
                source=["aws.ec2"],
                detail={
                    "eventName": ["RunInstances"],
                    "eventSource": ["ec2.amazonaws.com"]
                }),
            targets=[targets.SfnStateMachine(terminate_machine)])
Пример #15
0
    def __init__(self, app: core.App, stack_name: str, stack_env: str):
        super().__init__(scope=app, id=f"{stack_name}-{stack_env}")

        # CIDR
        cidr = "192.168.0.0/24"

        # === #
        # vpc #
        # === #
        vpc = aws_ec2.Vpc(
            self,
            id=f"{stack_name}-{stack_env}-vpc",
            cidr=cidr,
            subnet_configuration=[
                # Public Subnet
                aws_ec2.SubnetConfiguration(
                    cidr_mask=28,
                    name=f"{stack_name}-{stack_env}-public",
                    subnet_type=aws_ec2.SubnetType.PUBLIC,
                )
            ],
        )

        security_group = aws_ec2.SecurityGroup(
            self,
            id=f'security-group-for-{stack_name}-{stack_env}',
            vpc=vpc,
            security_group_name=f'security-group-for-{stack_name}-{stack_env}',
            allow_all_outbound=True)

        batch_role = aws_iam.Role(
            scope=self,
            id=f"batch_role_for_{stack_name}-{stack_env}",
            role_name=f"batch_role_for_{stack_name}-{stack_env}",
            assumed_by=aws_iam.ServicePrincipal("batch.amazonaws.com"))

        batch_role.add_managed_policy(
            aws_iam.ManagedPolicy.from_managed_policy_arn(
                scope=self,
                id=f"AWSBatchServiceRole-{stack_env}",
                managed_policy_arn=
                "arn:aws:iam::aws:policy/service-role/AWSBatchServiceRole"))

        batch_role.add_to_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=["arn:aws:logs:*:*:*"],
                                    actions=[
                                        "logs:CreateLogGroup",
                                        "logs:CreateLogStream",
                                        "logs:PutLogEvents",
                                        "logs:DescribeLogStreams"
                                    ]))

        # Role to attach EC2
        instance_role = aws_iam.Role(
            scope=self,
            id=f"instance_role_for_{stack_name}-{stack_env}",
            role_name=f"instance_role_for_{stack_name}-{stack_env}",
            assumed_by=aws_iam.ServicePrincipal("ec2.amazonaws.com"))

        instance_role.add_managed_policy(
            aws_iam.ManagedPolicy.from_managed_policy_arn(
                scope=self,
                id=f"AmazonEC2ContainerServiceforEC2Role-{stack_env}",
                managed_policy_arn=
                "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role"
            ))

        # add policy to access S3
        instance_role.add_to_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=["*"],
                                    actions=["s3:*"]))

        # add policy to access CloudWatch Logs
        instance_role.add_to_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=["arn:aws:logs:*:*:*"],
                                    actions=[
                                        "logs:CreateLogGroup",
                                        "logs:CreateLogStream",
                                        "logs:PutLogEvents",
                                        "logs:DescribeLogStreams"
                                    ]))

        # attach role to EC2
        instance_profile = aws_iam.CfnInstanceProfile(
            scope=self,
            id=f"instance_profile_for_{stack_name}-{stack_env}",
            instance_profile_name=
            f"instance_profile_for_{stack_name}-{stack_env}",
            roles=[instance_role.role_name])

        # ===== #
        # batch #
        # ===== #
        batch_compute_resources = aws_batch.ComputeResources(
            vpc=vpc,
            maxv_cpus=4,
            minv_cpus=0,
            security_groups=[security_group],
            instance_role=instance_profile.attr_arn,
            type=aws_batch.ComputeResourceType.SPOT)

        batch_compute_environment = aws_batch.ComputeEnvironment(
            scope=self,
            id=f"ProjectEnvironment-{stack_env}",
            compute_environment_name=f"ProjectEnvironmentBatch-{stack_env}",
            compute_resources=batch_compute_resources,
            service_role=batch_role)

        job_role = aws_iam.Role(
            scope=self,
            id=f"job_role_{stack_name}-{stack_env}",
            role_name=f"job_role_{stack_name}-{stack_env}",
            assumed_by=aws_iam.ServicePrincipal("ecs-tasks.amazonaws.com"))

        job_role.add_managed_policy(
            aws_iam.ManagedPolicy.from_managed_policy_arn(
                scope=self,
                id=f"AmazonECSTaskExecutionRolePolicy_{stack_name}-{stack_env}",
                managed_policy_arn=
                "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy"
            ))

        job_role.add_managed_policy(
            aws_iam.ManagedPolicy.from_managed_policy_arn(
                scope=self,
                id=f"AmazonS3FullAccess_{stack_name}-{stack_env}",
                managed_policy_arn="arn:aws:iam::aws:policy/AmazonS3FullAccess"
            ))

        job_role.add_managed_policy(
            aws_iam.ManagedPolicy.from_managed_policy_arn(
                scope=self,
                id=f"CloudWatchLogsFullAccess_{stack_name}-{stack_env}",
                managed_policy_arn=
                "arn:aws:iam::aws:policy/CloudWatchLogsFullAccess"))

        batch_job_queue = aws_batch.JobQueue(
            scope=self,
            id=f"job_queue_for_{stack_name}-{stack_env}",
            job_queue_name=f"job_queue_for_{stack_name}-{stack_env}",
            compute_environments=[
                aws_batch.JobQueueComputeEnvironment(
                    compute_environment=batch_compute_environment, order=1)
            ],
            priority=1)

        # ECR repository
        ecr_repository = aws_ecr_assets.DockerImageAsset(
            scope=self,
            id=f"ecr_image_{stack_env}",
            directory="./docker",
            repository_name=f"repository_for_{stack_env}")

        # get image from ECR
        container_image = aws_ecs.ContainerImage.from_ecr_repository(
            repository=ecr_repository.repository)

        # job define
        # pass `S3_BUCKET` as environment argument.
        batch_job_definition = aws_batch.JobDefinition(
            scope=self,
            id=f"job_definition_for_{stack_env}",
            job_definition_name=f"job_definition_for_{stack_env}",
            container=aws_batch.JobDefinitionContainer(
                image=container_image,
                environment={"S3_BUCKET": f"{S3_BUCKET}"},
                job_role=job_role,
                vcpus=1,
                memory_limit_mib=1024))

        # ============= #
        # StepFunctions #
        # ============= #
        # Ref::{keyword} can be replaced with StepFunction input
        command_overrides = ["python", "__init__.py", "--time", "Ref::time"]

        batch_task = aws_sfn_tasks.BatchSubmitJob(
            scope=self,
            id=f"batch_job_{stack_env}",
            job_definition=batch_job_definition,
            job_name=f"batch_job_{stack_env}_today",
            job_queue=batch_job_queue,
            container_overrides=aws_sfn_tasks.BatchContainerOverrides(
                command=command_overrides),
            payload=aws_sfn.TaskInput.from_object({"time.$": "$.time"}))

        # `one step` for StepFunctions
        definition = batch_task

        sfn_daily_process = aws_sfn.StateMachine(
            scope=self,
            id=f"YourProjectSFn-{stack_env}",
            definition=definition)

        # ================ #
        # CloudWatch Event #
        # ================ #

        # Run every day at 21:30 JST
        # See https://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html
        events_daily_process = aws_events.Rule(
            scope=self,
            id=f"DailySFnProcess-{stack_env}",
            schedule=aws_events.Schedule.cron(minute="30",
                                              hour="12",
                                              month='*',
                                              day="*",
                                              year='*'),
        )
        events_daily_process.add_target(
            aws_events_targets.SfnStateMachine(sfn_daily_process))