Example #1
0
    def __init__(self, scope: core.Construct, id: str, *,
                 profile_name: str,
                 namespace: str = 'default',
                 vpc: Optional[ec2.Vpc] = None,
                 artifacts_bucket: Optional[s3.Bucket] = None,
                 artifacts_path: Optional[str] = None,
                 logs_bucket: Optional[s3.Bucket] = None,
                 logs_path: Optional[str] = 'elasticmapreduce/',
                 mutable_instance_role: bool = True,
                 mutable_security_groups: bool = True,
                 description: Optional[str] = None) -> None:
        super().__init__(scope, id)

        if not profile_name:
            return

        self._profile_name = profile_name
        self._namespace = namespace
        self._mutable_instance_role = mutable_instance_role
        self._mutable_security_groups = mutable_security_groups
        self._vpc = vpc
        self._security_groups = EMRSecurityGroups(self, 'SecurityGroups', vpc=vpc)
        self._roles = EMRRoles(
            self, 'Roles',
            role_name_prefix=f'{namespace}_{profile_name}',
            artifacts_bucket=artifacts_bucket,
            artifacts_path=artifacts_path,
            logs_bucket=logs_bucket,
            logs_path=logs_path)
        self._artifacts_bucket = artifacts_bucket
        self._artifacts_path = artifacts_path
        self._logs_bucket = logs_bucket
        self._logs_path = logs_path
        self._description = description

        self._s3_encryption_configuration = {
            'EncryptionMode': S3EncryptionMode.SSE_S3.value
        }
        self._local_disk_encryption_configuration = None
        self._tls_certificate_configuration = None
        self._kerberos_configuration = None
        self._kerberos_attributes_secret = None
        self._emrfs_configuration = None
        self._lake_formation_configuration = None

        self._security_configuration = None
        self._security_configuration_name = None

        self._ssm_parameter = ssm.CfnParameter(
            self, 'SSMParameter',
            type='String',
            value=json.dumps(self.to_json()),
            tier='Intelligent-Tiering',
            name=f'{SSM_PARAMETER_PREFIX}/{namespace}/{profile_name}')

        self._construct_security_configuration()

        self._rehydrated = False
Example #2
0
    def __init__(self, scope: core.Construct, id: str, stage: Optional[str] = 'prod', **kwargs) -> None:
        super().__init__(scope, id+'-'+stage, **kwargs)

        func_name = id + '-' + stage
        handler = lambda_.Function(self, func_name, code=lambda_.Code.from_asset('identity_checking_service'),
                                   runtime=lambda_.Runtime.NODEJS_10_X, handler='main.handler')

        ssm.CfnParameter(self, id='ServiceEndpointSSM', name='ServiceEndpointSSM',
                         type='String', value=handler.function_arn)
        self._identity_checking_service_arn = handler.function_arn
Example #3
0
  def __init__(self, scope:core.Construct, id:str, landing_zone:LandingZone, peers:List[LandingZone], amazon_asn:int, **kwargs):
    """
    Configure the Transit Gateways
    """
    super().__init__(scope,id, **kwargs)
    self.landing_zone = landing_zone
    self.peers = peers
    
    self.gateway = ec2.CfnTransitGateway(self,'TransitGateway',
      amazon_side_asn=amazon_asn,
      auto_accept_shared_attachments='enable',
      default_route_table_association='enable',
      default_route_table_propagation='enable',
      description='HomeNet TransitGateway',
      dns_support='enable',
      vpn_ecmp_support='enable',
      tags=[
        core.CfnTag(key='Name',value='HomeNet/TGW')
      ])

    entries = []
    for peer in peers:
      if peer == landing_zone:
        continue
      entries.append(ec2.CfnPrefixList.EntryProperty(cidr=peer.cidr_block,description=peer.zone_name))

    ec2.CfnPrefixList(self,'PeerPrefix',
      address_family='IPv4',
      entries= entries,
      max_entries=100,
      prefix_list_name='nbachmei.homenet.tgw-peers',
      tags=[core.CfnTag(key='Name',value='HomeNet TGW Prefixes')])

    ec2.CfnTransitGatewayAttachment(self,'VpcAttachment',
      subnet_ids= landing_zone.vpc.select_subnets(subnet_group_name='TGW').subnet_ids,
      transit_gateway_id= self.gateway.ref,
      vpc_id= landing_zone.vpc.vpc_id,
      tags=[core.CfnTag(key='Name',value='HomeNet')])

    ssm.CfnParameter(self,'RegionalGatewayParameter',
      name='/homenet/{}/transit-gateway/gateway-id'.format(landing_zone.region),
      value=self.gateway.ref,
      type='String')

    self.__add_peers()
Example #4
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        *,
        configuration_name: str,
        namespace: str = 'default',
        release_label: Optional[str] = 'emr-5.29.0',
        applications: Optional[List[str]] = None,
        bootstrap_actions: Optional[List[emr_code.EMRBootstrapAction]] = None,
        configurations: Optional[List[dict]] = None,
        use_glue_catalog: Optional[bool] = True,
        step_concurrency_level: Optional[int] = 1,
        description: Optional[str] = None,
        secret_configurations: Optional[Dict[str,
                                             secretsmanager.Secret]] = None):

        super().__init__(scope, id)

        self._override_interfaces = {}

        if configuration_name is None:
            return

        self._configuration_name = configuration_name
        self._namespace = namespace
        self._description = description
        self._bootstrap_actions = bootstrap_actions
        self._secret_configurations = secret_configurations
        self._spark_packages = []
        self._spark_jars = []

        if bootstrap_actions:
            # Create a nested Construct to avoid Construct id collisions
            construct = core.Construct(self, 'BootstrapActions')
            resolved_bootstrap_actions = [
                b.resolve(construct) for b in bootstrap_actions
            ]
        else:
            resolved_bootstrap_actions = []

        self._config = {
            'AdditionalInfo':
            None,
            'AmiVersion':
            None,
            'Applications':
            self._get_applications(applications),
            'AutoScalingRole':
            None,
            'BootstrapActions':
            resolved_bootstrap_actions,
            'Configurations':
            self._get_configurations(configurations, use_glue_catalog),
            'CustomAmiId':
            None,
            'EbsRootVolumeSize':
            None,
            'Instances': {
                'AdditionalMasterSecurityGroups': None,
                'AdditionalSlaveSecurityGroups': None,
                'Ec2KeyName': None,
                'Ec2SubnetId': None,
                'Ec2SubnetIds': None,
                'EmrManagedMasterSecurityGroup': None,
                'EmrManagedSlaveSecurityGroup': None,
                'HadoopVersion': None,
                'InstanceCount': None,
                'InstanceFleets': None,
                'InstanceGroups': None,
                'KeepJobFlowAliveWhenNoSteps': True,
                'MasterInstanceType': None,
                'Placement': None,
                'ServiceAccessSecurityGroup': None,
                'SlaveInstanceType': None,
                'TerminationProtected': False,
            },
            'JobFlowRole':
            None,
            'KerberosAttributes':
            None,
            'LogUri':
            None,
            'Name':
            configuration_name,
            'NewSupportedProducts':
            None,
            'ReleaseLabel':
            release_label,
            'RepoUpgradeOnBoot':
            None,
            'ScaleDownBehavior':
            None,
            'SecurityConfiguration':
            None,
            'ServiceRole':
            None,
            'StepConcurrencyLevel':
            step_concurrency_level,
            'SupportedProducts':
            None,
            'Tags': [],
            'VisibleToAllUsers':
            True,
        }

        self._configuration_artifacts = []
        if bootstrap_actions is not None:
            for bootstrap_action in bootstrap_actions:
                if bootstrap_action.code is not None:
                    self._configuration_artifacts.append({
                        'Bucket':
                        bootstrap_action.code.deployment_bucket.bucket_name,
                        'Path':
                        os.path.join(bootstrap_action.code.deployment_prefix,
                                     '*')
                    })

        self._ssm_parameter = ssm.CfnParameter(
            self,
            'SSMParameter',
            type='String',
            value=json.dumps(self.to_json()),
            tier='Intelligent-Tiering',
            name=f'{SSM_PARAMETER_PREFIX}/{namespace}/{configuration_name}')

        self.override_interfaces['default'] = {
            'ClusterName': {
                'JsonPath': 'Name',
                'Default': configuration_name
            },
            'ReleaseLabel': {
                'JsonPath': 'ReleaseLabel',
                'Default': release_label
            },
            'StepConcurrencyLevel': {
                'JsonPath': 'StepConcurrencyLevel',
                'Default': step_concurrency_level
            }
        }

        self._rehydrated = False
Example #5
0
    def __init__(
            self,
            scope: core.Construct,
            id: str,
            *,
            launch_function_name: str,
            emr_profile: emr_profile.EMRProfile,
            cluster_configuration: cluster_configuration.ClusterConfiguration,
            cluster_name: str = None,
            namespace: str = 'default',
            default_fail_if_cluster_running: bool = False,
            success_topic: Optional[sns.Topic] = None,
            failure_topic: Optional[sns.Topic] = None,
            override_cluster_configs_lambda: Optional[
                aws_lambda.Function] = None,
            allowed_cluster_config_overrides: Optional[Dict[str,
                                                            Dict[str,
                                                                 str]]] = None,
            description: Optional[str] = None,
            cluster_tags: Union[List[core.Tag], Dict[str, str], None] = None,
            wait_for_cluster_start: bool = True) -> None:
        super().__init__(scope, id)

        if launch_function_name is None:
            return

        self._launch_function_name = launch_function_name
        self._namespace = namespace
        self._emr_profile = emr_profile
        self._cluster_configuration = cluster_configuration
        self._cluster_name = cluster_name
        self._default_fail_if_cluster_running = default_fail_if_cluster_running
        self._success_topic = success_topic
        self._failure_topic = failure_topic
        self._override_cluster_configs_lambda = override_cluster_configs_lambda
        self._description = description
        self._wait_for_cluster_start = wait_for_cluster_start

        if allowed_cluster_config_overrides is None:
            self._allowed_cluster_config_overrides = cluster_configuration.override_interfaces.get(
                'default', None)
        else:
            self._allowed_cluster_config_overrides = allowed_cluster_config_overrides

        if isinstance(cluster_tags, dict):
            self._cluster_tags = [
                core.Tag(k, v) for k, v in cluster_tags.items()
            ]
        elif isinstance(cluster_tags, list):
            self._cluster_tags = cluster_tags
        else:
            self._cluster_tags = []

        self._cluster_tags.extend([
            core.Tag('deployment:product:name', __product__),
            core.Tag('deployment:product:version', __version__)
        ])

        if len(cluster_configuration.configuration_artifacts) > 0:
            if emr_profile.mutable_instance_role:
                for i in range(
                        len(cluster_configuration.configuration_artifacts)):
                    configuration_artifact = cluster_configuration.configuration_artifacts[
                        i]
                    bucket_name = configuration_artifact['Bucket']
                    path = configuration_artifact['Path']
                    bucket = s3.Bucket.from_bucket_name(
                        self, f'Bucket_{i}', bucket_name)
                    bucket.grant_read(emr_profile.roles.instance_role, path)
            else:
                logger.warn(
                    '--------------------------------------------------------------------------'
                )
                logger.warn(
                    'Unable to authorize the artifacts in the ClusterConfiguration'
                )
                logger.warn(
                    f'The EMRProfile {emr_profile.profile_name} has an immutable Instance Role'
                )
                logger.warn(
                    'Use of these artifacts will require direct authorization on the EMRProfile'
                )
                logger.warn(
                    '--------------------------------------------------------------------------'
                )

        fail = emr_chains.Fail(
            self,
            'FailChain',
            message=sfn.TaskInput.from_data_at('$.Error'),
            subject='EMR Launch Function Failure',
            topic=failure_topic,
            error='Failed to Launch Cluster',
            cause=
            'See Execution Event "FailStateEntered" for complete error cause')

        # Create Task for loading the cluster configuration from Parameter Store
        load_cluster_configuration = emr_tasks.LoadClusterConfigurationBuilder.build(
            self,
            'LoadClusterConfigurationTask',
            cluster_name=cluster_name,
            cluster_tags=self._cluster_tags,
            profile_namespace=emr_profile.namespace,
            profile_name=emr_profile.profile_name,
            configuration_namespace=cluster_configuration.namespace,
            configuration_name=cluster_configuration.configuration_name,
            result_path='$.ClusterConfiguration',
        )
        load_cluster_configuration.add_catch(fail,
                                             errors=['States.ALL'],
                                             result_path='$.Error')

        # Create Task for overriding cluster configurations
        override_cluster_configs = emr_tasks.OverrideClusterConfigsBuilder.build(
            self,
            'OverrideClusterConfigsTask',
            override_cluster_configs_lambda=override_cluster_configs_lambda,
            allowed_cluster_config_overrides=self.
            _allowed_cluster_config_overrides,
            input_path='$.ClusterConfiguration.Cluster',
            result_path='$.ClusterConfiguration.Cluster',
        )
        # Attach an error catch to the Task
        override_cluster_configs.add_catch(fail,
                                           errors=['States.ALL'],
                                           result_path='$.Error')

        # Create Task to conditionally fail if a cluster with this name is already
        # running, based on user input
        fail_if_cluster_running = emr_tasks.FailIfClusterRunningBuilder.build(
            self,
            'FailIfClusterRunningTask',
            default_fail_if_cluster_running=default_fail_if_cluster_running,
            input_path='$.ClusterConfiguration.Cluster',
            result_path='$.ClusterConfiguration.Cluster',
        )
        # Attach an error catch to the task
        fail_if_cluster_running.add_catch(fail,
                                          errors=['States.ALL'],
                                          result_path='$.Error')

        # Create a Task for updating the cluster tags at runtime
        update_cluster_tags = emr_tasks.UpdateClusterTagsBuilder.build(
            self,
            'UpdateClusterTagsTask',
            input_path='$.ClusterConfiguration.Cluster',
            result_path='$.ClusterConfiguration.Cluster',
        )
        # Attach an error catch to the Task
        update_cluster_tags.add_catch(fail,
                                      errors=['States.ALL'],
                                      result_path='$.Error')

        # Create a Task to create the cluster
        if cluster_configuration.secret_configurations is None and emr_profile.kerberos_attributes_secret is None:
            # Use a the standard Step Functions/EMR integration to create the cluster
            create_cluster = emr_tasks.CreateClusterBuilder.build(
                self,
                'CreateClusterTask',
                roles=emr_profile.roles,
                input_path='$.ClusterConfiguration.Cluster',
                result_path='$.LaunchClusterResult',
                wait_for_cluster_start=wait_for_cluster_start,
            )
        else:
            # Use the RunJobFlow Lambda to create the cluster to avoid exposing the
            # SecretConfigurations and KerberosAttributes values
            create_cluster = emr_tasks.RunJobFlowBuilder.build(
                self,
                'CreateClusterTask',
                roles=emr_profile.roles,
                kerberos_attributes_secret=emr_profile.
                kerberos_attributes_secret,
                secret_configurations=cluster_configuration.
                secret_configurations,
                input_path='$.ClusterConfiguration',
                result_path='$.LaunchClusterResult',
                wait_for_cluster_start=wait_for_cluster_start,
            )

        # Attach an error catch to the Task
        create_cluster.add_catch(fail,
                                 errors=['States.ALL'],
                                 result_path='$.Error')

        success = emr_chains.Success(
            self,
            'SuccessChain',
            message=sfn.TaskInput.from_data_at('$.LaunchClusterResult'),
            subject='Launch EMR Config Succeeded',
            topic=success_topic,
            output_path='$')

        definition = sfn.Chain \
            .start(load_cluster_configuration) \
            .next(override_cluster_configs) \
            .next(fail_if_cluster_running) \
            .next(update_cluster_tags) \
            .next(create_cluster) \
            .next(success)

        self._state_machine = sfn.StateMachine(
            self,
            'StateMachine',
            state_machine_name=f'{namespace}_{launch_function_name}',
            definition=definition)

        self._ssm_parameter = ssm.CfnParameter(
            self,
            'SSMParameter',
            type='String',
            value=json.dumps(self.to_json()),
            tier='Intelligent-Tiering',
            name=f'{SSM_PARAMETER_PREFIX}/{namespace}/{launch_function_name}')
 def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
     super().__init__(scope, id, **kwargs)
     
     # Parameters
     CognitoPoolId = core.CfnParameter(self,"CognitoPoolId",
         type = "String",
         default = "CognitoPoolId"
     )
     CognitoClientId = core.CfnParameter(self,"CognitoClientId",
         type = "String",
         default = "CognitoClientId"
     )
     CognitoClientSecret = core.CfnParameter(self,"CognitoClientSecret",
         type = "String",
         default = "CognitoClientSecret"
     )
     CognitoDomain = core.CfnParameter(self,"CognitoDomain",
         type = "String",
         default = "CognitoDomain"
     )
     BaseUrl = core.CfnParameter(self,"BaseUrl",
         type = "String",
         default = "BaseUrl"
     )
     MyDBEndpoint = core.CfnParameter(self,"MyDBEndpoint",
         type = "String",
         default = "MyDBEndpoint"
     )
     ImageS3Bucket = core.CfnParameter(self,"ImageS3Bucket",
         type = "String",
         default = "ImageS3Bucket"
     )
     DBPassword_parameters = core.CfnParameter(self, "DBPassword",
         no_echo = True,
         description = "RDS Password.",
         min_length = 1,
         max_length = 41,
         constraint_description = "the password must be between 1 and 41 characters",
         default = "default"
     )
     
     # CognitoPoolIdParameter
     CognitoPoolIdParameter = ssm.CfnParameter(self,"CognitoPoolIdParameter",
         name ="edx-COGNITO_POOL_ID",
         type = "String",
         value = CognitoPoolId.value_as_string
     )
     # CognitoClientIdParameter
     CognitoClientIdParameter = ssm.CfnParameter(self,"CognitoClientIdParameter",
         name ="edx-COGNITO_CLIENT_ID",
         type = "String",
         value = CognitoClientId.value_as_string
     )
     # CognitoClientSecretParameter
     CognitoClientSecretParameter = ssm.CfnParameter(self,"CognitoClientSecretParameter",
         name ="edx-COGNITO_CLIENT_SECRET",
         type = "String",
         value = CognitoClientSecret.value_as_string
     )
     # CognitoDomainParameter
     CognitoDomainParameter = ssm.CfnParameter(self,"CognitoDomainParameter",
         name ="edx-COGNITO_DOMAIN",
         type = "String",
         value = CognitoDomain.value_as_string
     )
     # BaseUrlParameter
     BaseUrlParameter = ssm.CfnParameter(self,"BaseUrlParameter",
         name ="edx-BASE_URL",
         type = "String",
         value = BaseUrl.value_as_string
     )
     # DBHostParameter
     DBHostParameter = ssm.CfnParameter(self,"DBHostParameter",
         name ="edx-DATABASE_HOST",
         type = "String",
         value = MyDBEndpoint.value_as_string
     )
     # DBUserParameter
     DBUserParameter = ssm.CfnParameter(self,"DBUserParameter",
         name ="edx-DATABASE_USER",
         type = "String",
         value = "web_user"
     )
     # DBPasswordParameter
     DBPasswordParameter = ssm.CfnParameter(self,"DBPasswordParameter",
         name ="edx-DATABASE_PASSWORD",
         type = "String",
         value = DBPassword_parameters.value_as_string
     )
     # DBNameParameter
     DBNameParameter = ssm.CfnParameter(self,"DBNameParameter",
         name ="edx-DATABASE_DB_NAME",
         type = "String",
         value = "Photos"
     )
     # FlaskSecretParameter
     FlaskSecretParameter = ssm.CfnParameter(self,"FlaskSecretParameter",
         name ="edx-FLASK_SECRET",
         type = "String",
         value = "secret"
     )
     # PhotosBuckeTParameter
     PhotosBuckeTParameter = ssm.CfnParameter(self,"PhotosBuckeTParameter",
         name ="edx-PHOTOS_BUCKET",
         type = "String",
         value = ImageS3Bucket.value_as_string
     )
     
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 id_checker: str,
                 event_bus: str,
                 stage: Optional[str] = 'prod',
                 **kwargs) -> None:
        super().__init__(scope, id + '-' + stage, **kwargs)

        app_table_name = id + '-applications-table-' + stage
        app_table = ddb.Table(self,
                              id=app_table_name,
                              table_name=app_table_name,
                              partition_key=ddb.Attribute(
                                  name='id', type=ddb.AttributeType.STRING),
                              billing_mode=ddb.BillingMode.PAY_PER_REQUEST)

        events_table_name = id + '-events-table-' + stage
        events_table = ddb.Table(self,
                                 id=events_table_name,
                                 table_name=events_table_name,
                                 partition_key=ddb.Attribute(
                                     name='id', type=ddb.AttributeType.STRING),
                                 billing_mode=ddb.BillingMode.PAY_PER_REQUEST,
                                 stream=ddb.StreamViewType.NEW_IMAGE)

        self._table_stream_arn = events_table.table_stream_arn

        # create our Lambda function for the bank account service
        func_name = id + '-' + stage + '-' + 'account-application'
        lambda_assets = lambda_.Code.from_asset('account_application_service')
        handler = lambda_.Function(self,
                                   func_name,
                                   code=lambda_assets,
                                   runtime=lambda_.Runtime.NODEJS_10_X,
                                   handler='main.handler',
                                   environment={
                                       'ACCOUNTS_TABLE_NAME':
                                       app_table.table_name,
                                       'EVENTS_TABLE_NAME':
                                       events_table.table_name,
                                       'REGION': core.Aws.REGION
                                   })

        gw.LambdaRestApi(self, id=stage + '-' + id, handler=handler)

        # grant main Lambda function access to DynamoDB tables
        app_table.grant_read_write_data(handler.role)
        events_table.grant_read_write_data(handler.role)

        p_statement = iam.PolicyStatement(actions=[
            'ssm:Describe*', 'ssm:Get*', 'ssm:List*', 'events:*', 'states:*'
        ],
                                          effect=iam.Effect.ALLOW,
                                          resources=['*'])
        handler.add_to_role_policy(statement=p_statement)

        # create the Lambda function for the event publisher
        evt_publisher = id + '-' + stage + '-' + 'event-publisher'
        evt_handler = lambda_.Function(
            self,
            evt_publisher,
            code=lambda_assets,
            runtime=lambda_.Runtime.NODEJS_10_X,
            handler='event-publisher.handler',
            events=[
                lambda_es.DynamoEventSource(
                    table=events_table,
                    starting_position=lambda_.StartingPosition.LATEST)
            ],
            environment={
                'EVENT_BRIDGE_ARN': event_bus,
                'REGION': core.Aws.REGION
            })

        evt_handler.add_to_role_policy(statement=p_statement)

        # set up StepFunctions
        approve_application = sf.Task(
            self,
            'Approve Application',
            task=sft.InvokeFunction(handler,
                                    payload={
                                        'body': {
                                            'command':
                                            'APPROVE_ACCOUNT_APPLICATION',
                                            'data': {
                                                'id.$': '$.application.id'
                                            }
                                        }
                                    }),
            result_path='$.approveApplication')

        reject_application = sf.Task(self,
                                     'Reject Application',
                                     task=sft.InvokeFunction(
                                         handler,
                                         payload={
                                             'body': {
                                                 'command':
                                                 'REJECT_ACCOUNT_APPLICATION',
                                                 'data': {
                                                     'id.$': '$.application.id'
                                                 }
                                             }
                                         }),
                                     result_path='$.rejectApplication')

        id_checker_handler = lambda_.Function.from_function_arn(
            self, 'IdentityChecker', function_arn=id_checker)
        check_identity = sf.Task(self,
                                 'Check Identity',
                                 task=sft.InvokeFunction(
                                     id_checker_handler,
                                     payload={
                                         'body': {
                                             'command': 'CHECK_IDENTITY',
                                             'data': {
                                                 'application.$':
                                                 '$.application'
                                             }
                                         }
                                     }))

        wait_for_human_review = sf.Task(self, 'Wait for Human Review',
                                        task=sft.RunLambdaTask(handler,
                                                               integration_pattern=sf.ServiceIntegrationPattern.WAIT_FOR_TASK_TOKEN,
                                                               payload={
                                                                   'body': {
                                                                       'command': 'FLAG_ACCOUNT_APPLICATION_FOR_HUMAN_REVIEW',
                                                                       'data': {
                                                                           'id.$': '$.application.id',
                                                                           'taskToken': sf.Context.task_token
                                                                       }
                                                                   }
                                                               }), result_path='$.humanReview') \
            .next(
            sf.Choice(self, 'Human Approval Choice')
            .when(sf.Condition.string_equals('$.humanReview.decision', 'APPROVE'), next=approve_application)
            .when(sf.Condition.string_equals('$.humanReview.decision', 'REJECT'), next=reject_application))

        sm_definition = sf.Parallel(self, 'Perform Automated Checks', result_path='$.checks') \
            .branch(check_identity) \
            .branch(sf.Pass(self, 'Check Fraud Model', result=sf.Result({'flagged': False}))) \
            .next(
            sf.Choice(self, 'Automated Checks Choice')
                .when(sf.Condition.boolean_equals('$.checks[0].flagged', True), next=wait_for_human_review)
                .when(sf.Condition.boolean_equals('$.checks[1].flagged', True), next=wait_for_human_review)
                .otherwise(approve_application))

        state_machine = sf.StateMachine(self,
                                        'OpenAccountStateMachine' + stage,
                                        definition=sm_definition)
        ssm.CfnParameter(self,
                         id='StateMachineArnSSM',
                         type='String',
                         value=state_machine.state_machine_arn,
                         name='StateMachineArnSSM')