def createWithdrawWorkflow(self): payInvoiceFailed = tasks.LambdaInvoke(self, 'payInvoiceFailed', lambda_function=self.createLambda('payInvoiceFailedLambda', 'payInvoiceFailed.payInvoiceFailed'), timeout=cdk.Duration.seconds(300) ).next(sfn.Fail(self, 'tipErrorState')) payInvoiceSucceeded = tasks.LambdaInvoke(self, 'payInvoiceSucceeded', lambda_function=self.createLambda('payInvoiceSucceededLambda', 'payInvoiceSucceeded.payInvoiceSucceeded'), timeout=cdk.Duration.seconds(300) ).next(sfn.Succeed(self, 'tipSuccessState')) self.payInvoice = tasks.StepFunctionsInvokeActivity(self, 'payInvoice', activity=sfn.Activity(self, 'payInvoiceActivity'), heartbeat=cdk.Duration.seconds(86400), timeout=cdk.Duration.seconds(86400), ) self.payInvoice.add_retry( backoff_rate=2, errors=['States.Timeout'], interval=cdk.Duration.seconds(600), max_attempts=0 ) self.payInvoice.add_catch( handler=payInvoiceFailed, errors=['States.ALL'], result_path='$.errorInfo' ) self.payInvoice.next(payInvoiceSucceeded) return sfn.StateMachine(self, 'withdrawWorkflow', definition=self.payInvoice, role=self.statesRole )
def create_enumerate_statemachine(self): enumerate_job = tasks.LambdaInvoke( self, "Enumerate Notes Job", lambda_function=self.step_lambda, payload=sfn.TaskInput.from_object({"action": "enumerate_notes"}), ) get_tf_job = tasks.LambdaInvoke(self, "Get Text Frequency Job", lambda_function=self.step_lambda, payload=sfn.TaskInput.from_object({ "action": "update_tf", "id.$": "$.id", "contentUpdatedAt.$": "$.contentUpdatedAt", "isArchived.$": "$.isArchived", }), output_path="$.Payload") map_job = sfn.Map(self, "Notes Map", items_path="$.Payload.id_list", max_concurrency=8) get_idf_job = tasks.LambdaInvoke( self, "Get Inter Document Frequency Job", lambda_function=self.step_lambda, payload=sfn.TaskInput.from_object({ "action": "update_idf", "notes.$": "$" }), ) map_tfidf_job = sfn.Map(self, "TF*IDF Notes Map", items_path="$.Payload.notes", max_concurrency=100) get_tfidf_job = tasks.LambdaInvoke( self, "Get TF*IDF WordCloud Image Job", lambda_function=self.step_lambda, payload=sfn.TaskInput.from_object({ "action": "update_tfidf_png", "id.$": "$.id", "contentUpdatedAt.$": "$.contentUpdatedAt", "isArchived.$": "$.isArchived", }), ) definition = (enumerate_job.next( map_job.iterator(get_tf_job)).next(get_idf_job).next( map_tfidf_job.iterator(get_tfidf_job))) self.enumerate_statemachine = sfn.StateMachine( self, "EnumerateStateMachine", definition=definition, timeout=core.Duration.hours(5), )
def __init__(self, scope: core.Construct, id: str, *, polling_delay: int = 5, statemachine_timeout: int = 300, **kwargs): super().__init__(scope, id, **kwargs) state_fn = StateHandlerLambda(self, "config-state-handler").function config_fn = AccountConfigLambda(self, "account-config-handler").function config_state = tasks.LambdaInvoke(self, "Set Configuring State", lambda_function=state_fn, output_path="$.Payload") completed_state = tasks.LambdaInvoke(self, "Set Completed State", lambda_function=state_fn, output_path="$.Payload") config_task = tasks.LambdaInvoke(self, "Request Account Configuration", lambda_function=config_fn, output_path="$.Payload") polling_task = tasks.LambdaInvoke(self, "Poll Account Configuration", lambda_function=config_fn, output_path="$.Payload") delay = sfn.Wait(self, "Delay Polling", time=sfn.WaitTime.duration( core.Duration.seconds(polling_delay))) is_ready = sfn.Choice(self, "Account Ready?") acct_ready = sfn.Condition.string_equals('$.state', "READY") acct_pending = sfn.Condition.string_equals('$.state', "PENDING") success = sfn.Succeed(self, "Config Succeeded") failed = sfn.Fail(self, "Config Failed", cause="Bad value in Polling loop") # this is the loop which polls for state change, either looping back to delay or setting completion state and finishing is_ready.when(acct_pending, delay).when( acct_ready, completed_state.next(success)).otherwise(failed) # this is the main chain starting with creation request a delay and then polling loop config_chain = config_task.next(config_state).next(delay).next( polling_task).next(is_ready) self.state_machine = sfn.StateMachine( self, "Account-Config-StateMachine", definition=config_chain, timeout=core.Duration.seconds(statemachine_timeout))
def __init__(self, scope: Construct, id: str, functions: LambdaLib, **kwargs) -> None: super().__init__(scope, id) # Step Function submit_job = tasks.LambdaInvoke(self, "Submit Job", lambda_function=functions.send_email_approval, payload=sfn.TaskInput.from_object({'ExecutionContext.$': '$$'}), result_path=sfn.JsonPath.DISCARD ) wait_x = sfn.Wait(self, "Wait", time= sfn.WaitTime.duration(Duration.minutes(2)) ) get_status = tasks.LambdaInvoke(self, "Get Job Status", lambda_function=functions.check_status_dynamo, payload=sfn.TaskInput.from_object({'ExecutionContext.$': '$$'}), result_path="$.status" ) restrict_es = tasks.LambdaInvoke(self, "Restric ES Policy", lambda_function=functions.restric_es_policy, payload=sfn.TaskInput.from_object({'ExecutionContext.$': '$$'}), ) restrict_rds = tasks.LambdaInvoke(self, "Restric RDS", lambda_function=functions.restric_rds_policy, payload=sfn.TaskInput.from_object({'ExecutionContext.$': '$$'}), ) restrict_es_condition = sfn.Condition.string_equals("$.detail.additionalEventData.configRuleName", constants.CONFIG_RULE_ES_PUBLIC) restrict_rds_condition = sfn.Condition.string_equals("$.detail.additionalEventData.configRuleName", constants.CONFIG_RULE_RDS_PUBLIC) definition = (submit_job.next(wait_x) .next(get_status) .next(sfn.Choice(self, "Job Complete?") .when(sfn.Condition.string_equals("$.status.Payload.status", "Rejected!"), wait_x) # .when(sfn.Condition.string_equals("$.status.Payload.status", "NON_COMPLIANT"), final_task) # .when(sfn.Condition.string_equals("$.status.Payload.status", "Accepted!"), final_task)) .otherwise(sfn.Choice(self, "Remediation Choice") .when(restrict_es_condition, restrict_es) .when(restrict_rds_condition, restrict_rds))) ) self.state_machine = sfn.StateMachine(self, "StateMachine", definition=definition, timeout=Duration.hours(2) )
def lambda_task( self, name: str, fx: aws_lambda.IFunction=None, fx_attr: aws_lambda.FunctionProps=None, invoke: dict={}) -> aws_stepfunctions_tasks.LambdaInvoke: """Create a Lambda function step Args: name (str): [description] fx (aws_lambda.IFunction, optional): [description]. Defaults to None. fx_attr (aws_lambda.FunctionProps, optional): [description]. Defaults to None. invoke (dict, optional): [description]. Defaults to {}. Returns: aws_stepfunctions_tasks.LambdaInvoke: [description] """ if not fx: fx = self.lambda_function(name, fx_attr=fx_attr) # Keep a reference to that lambda Function self.lambdas[name] = fx return aws_stepfunctions_tasks.LambdaInvoke( self, name, lambda_function=fx, **invoke )
def build(scope: core.Construct, id: str, *, input_path: str = '$', output_path: Optional[str] = None, result_path: Optional[str] = None) -> sfn.Task: # We use a nested Construct to avoid collisions with Lambda and Task ids construct = core.Construct(scope, id) update_cluster_tags_lambda = emr_lambdas.UpdateClusterTagsBuilder.get_or_build( construct) return sfn_tasks.LambdaInvoke( construct, 'Update Cluster Tags', output_path=output_path, result_path=result_path, lambda_function=update_cluster_tags_lambda, payload_response_only=True, payload=sfn.TaskInput.from_object({ 'ExecutionInput': sfn.TaskInput.from_context_at('$$.Execution.Input').value, 'Input': sfn.TaskInput.from_data_at(input_path).value }), )
def build(scope: core.Construct, id: str, *, default_fail_if_cluster_running: bool, input_path: str = '$', output_path: Optional[str] = None, result_path: Optional[str] = None) -> sfn.Task: # We use a nested Construct to avoid collisions with Lambda and Task ids construct = core.Construct(scope, id) fail_if_cluster_running_lambda = emr_lambdas.FailIfClusterRunningBuilder.get_or_build( construct) return sfn_tasks.LambdaInvoke( construct, 'Fail If Cluster Running', output_path=output_path, result_path=result_path, lambda_function=fail_if_cluster_running_lambda, payload_response_only=True, payload=sfn.TaskInput.from_object({ 'ExecutionInput': sfn.TaskInput.from_context_at('$$.Execution.Input').value, 'DefaultFailIfClusterRunning': default_fail_if_cluster_running, 'Input': sfn.TaskInput.from_data_at(input_path).value }), )
def build(scope: core.Construct, id: str, *, override_cluster_configs_lambda: Optional[ aws_lambda.Function] = None, allowed_cluster_config_overrides: Optional[Dict[str, str]] = None, input_path: str = '$', output_path: Optional[str] = None, result_path: Optional[str] = None) -> sfn.Task: # We use a nested Construct to avoid collisions with Lambda and Task ids construct = core.Construct(scope, id) override_cluster_configs_lambda = \ emr_lambdas.OverrideClusterConfigsBuilder.get_or_build(construct) \ if override_cluster_configs_lambda is None \ else override_cluster_configs_lambda return sfn_tasks.LambdaInvoke( construct, 'Override Cluster Configs', output_path=output_path, result_path=result_path, lambda_function=override_cluster_configs_lambda, payload_response_only=True, payload=sfn.TaskInput.from_object({ 'ExecutionInput': sfn.TaskInput.from_context_at('$$.Execution.Input').value, 'Input': sfn.TaskInput.from_data_at(input_path).value, 'AllowedClusterConfigOverrides': allowed_cluster_config_overrides }), )
def __init__(self, scope: core.Construct, id: str, *, emr_step: emr_code.EMRStep, cluster_id: str, result_path: Optional[str] = None, output_path: Optional[str] = None, fail_chain: Optional[sfn.IChainable] = None, wait_for_step_completion: bool = True): super().__init__(scope, id) override_step_args = emr_lambdas.OverrideStepArgsBuilder.get_or_build( self) override_step_args_task = sfn_tasks.LambdaInvoke( self, f'{emr_step.name} - Override Args', result_path=f'$.{id}ResultArgs', lambda_function=override_step_args, payload_response_only=True, payload=sfn.TaskInput.from_object({ 'ExecutionInput': sfn.TaskInput.from_context_at('$$.Execution.Input').value, 'StepName': emr_step.name, 'Args': emr_step.args }), ) resolved_step = emr_step.resolve(self) resolved_step['HadoopJarStep']['Args'] = sfn.TaskInput.from_data_at( f'$.{id}ResultArgs').value integration_pattern = sfn.IntegrationPattern.RUN_JOB if wait_for_step_completion \ else sfn.IntegrationPattern.REQUEST_RESPONSE add_step_task = emr_tasks.EmrAddStepTask( self, emr_step.name, output_path=output_path, result_path=result_path, cluster_id=cluster_id, step=resolved_step, integration_pattern=integration_pattern, ) if fail_chain: override_step_args_task.add_catch(fail_chain, errors=['States.ALL'], result_path='$.Error') add_step_task.add_catch(fail_chain, errors=['States.ALL'], result_path='$.Error') override_step_args_task.next(add_step_task) self._start = override_step_args_task self._end = add_step_task
def createTipWorkflow(self): notifyTipper = tasks.LambdaInvoke(self, 'notifyTipper', lambda_function=self.createLambda('notifyTipperLambda', 'tipNotifier.tipNotifier'), timeout=cdk.Duration.seconds(300) ).next(sfn.Succeed(self, 'withdrawSuccessState')) self.getTipperInvoice = tasks.StepFunctionsInvokeActivity(self, 'getTipperInvoice', activity=sfn.Activity(self, 'getTipperInvoiceActivity'), heartbeat=cdk.Duration.seconds(60), timeout=cdk.Duration.seconds(86400), ) self.getTipperInvoice.add_retry( backoff_rate=1.5, errors=['States.Timeout'], interval=cdk.Duration.seconds(60), max_attempts=7 ) self.getTipperInvoice.add_catch( handler=sfn.Fail(self, 'withdrawErrorState'), errors=['States.ALL'], result_path='$.errorInfo' ) self.getTipperInvoice.next(notifyTipper) return sfn.StateMachine(self, 'tipWorkflow', definition=self.getTipperInvoice, role=self.statesRole )
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) flip_coin_function = lambda_.Function( self, "FlipCoinFunction", runtime=lambda_.Runtime.PYTHON_3_8, handler="index.handler", code=lambda_.Code.from_asset("./sfn/lambda/flip_coin")) flip_coin_invoke = tasks.LambdaInvoke( self, "FlipCoin", lambda_function=flip_coin_function) wait = stepfunctions.Wait(self, "Wait", time=stepfunctions.WaitTime.duration( core.Duration.seconds(5))) tails_result = stepfunctions.Pass(self, "TailsResult") tails_result.next(flip_coin_invoke) choice = stepfunctions.Choice(self, "HeadsTailsChoice") \ .when(condition=stepfunctions.Condition.string_equals("$.Payload.result", "heads"), next=stepfunctions.Succeed(self, "HeadsResult")) \ .when(condition=stepfunctions.Condition.string_equals("$.Payload.result", "tails"), next=tails_result) stepfunctions.StateMachine(self, "StateMachine", definition=flip_coin_invoke.next( wait.next(choice)))
def __init__( self, scope: Construct, construct_id: str, *, directory: str, botocore_lambda_layer: aws_lambda_python.PythonLayerVersion, result_path: Optional[str] = JsonPath.DISCARD, extra_environment: Optional[Mapping[str, str]] = None, ): super().__init__(scope, construct_id) self.lambda_function = BundledLambdaFunction( self, f"{construct_id}-bundled-lambda-function", directory=directory, extra_environment=extra_environment, botocore_lambda_layer=botocore_lambda_layer, ) self.lambda_invoke = aws_stepfunctions_tasks.LambdaInvoke( scope, f"{construct_id}-lambda-invoke", lambda_function=self.lambda_function, result_path=result_path, payload_response_only=True, )
def __init__(self, scope, id, name=None, lambdas=None) -> None: super().__init__(scope, id) # ================================================== # ================= IAM ROLE ======================= # ================================================== state_machine_role = iam.Role( scope=self, id='state_machine_role', assumed_by=iam.ServicePrincipal(service='states.amazonaws.com'), ) state_machine_role.add_to_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=['lambda:InvokeFunction'], resources=['*'])) # ================================================== # ================= STATE MACHINE ================== # ================================================== invoke_lambda_rf = tasks.LambdaInvoke( scope=self, id='Random Forest', lambda_function=lambdas['lambda_rf'], payload_response_only=True) invoke_lambda_svr = tasks.LambdaInvoke( scope=self, id='Support Vector', lambda_function=lambdas['lambda_svr'], payload_response_only=True) invoke_lambda_lr = tasks.LambdaInvoke( scope=self, id='Linear Regressor', lambda_function=lambdas['lambda_lr'], payload_response_only=True) definition = sfn.Parallel( scope=self, id='Invoke Predictions').branch(invoke_lambda_rf).branch( invoke_lambda_svr).branch(invoke_lambda_lr) self.state_machine = sfn.StateMachine( scope=self, id='state_machine', state_machine_name=name, definition=definition, role=state_machine_role, state_machine_type=sfn.StateMachineType.EXPRESS)
def __init__(self, scope: core.Construct, id: builtins.str, action_name: str, resources: FsiSharedResources, function: lambda_.Function) -> None: super().__init__(scope, id) self.__resources = resources state_machine_name = id # Define the state machine definition... invoke_function = sft.LambdaInvoke( self, 'InvokeFunction', lambda_function=function, invocation_type=sft.LambdaInvocationType.REQUEST_RESPONSE, input_path='$.Payload', result_path='$.Result') choice = sf.Choice(self, 'IsComplete', comment='Check if theres more to process') choice.when( sf.Condition.string_equals('$.Result.Payload.Result.RunState', 'RunStatus.MORE_AVAILABLE'), invoke_function) choice.when( sf.Condition.string_equals('$.Result.Payload.Result.RunState', 'RunStatus.COMPLETE'), sf.Pass(self, 'Finalize', comment='Workflow Complete')) choice.otherwise( sf.Fail(self, 'NotImplemented', cause='Unknown Choice', error='NotImplementedException')) definition = invoke_function.next(choice) # Register the definition as StateMachine... zone_name = self.resources.landing_zone.zone_name self.state_machine = sf.StateMachine( self, 'StateMachine', state_machine_name=state_machine_name, state_machine_type=sf.StateMachineType.STANDARD, timeout=core.Duration.hours(2), logs=sf.LogOptions(destination=logs.LogGroup( self, 'LogGroup', removal_policy=core.RemovalPolicy.DESTROY, retention=RetentionDays.TWO_WEEKS, log_group_name='/homenet/fsi-{}/states/{}/{}'.format( zone_name, self.component_name, action_name).lower())), tracing_enabled=True, definition=definition)
def _invoke_lambda(self, name: str, fx: aws_lambda.IFunction=None, code: aws_lambda.Code=None, handler: str=None, runtime=aws_lambda.Runtime.PYTHON_3_7): if not fx: fx = aws_lambda.Function( self, "fxi_{}".format(name), code=code, handler=handler, runtime=runtime ) return sfn_tasks.LambdaInvoke( self, "{}".format(name), lambda_function=fx # output_path='$.Payload', )
def _invoke_lambda(self, name: str, fx: aws_lambda.IFunction = None, code: aws_lambda.AssetCode = None, handler: str = None, runtime=aws_lambda.Runtime.PYTHON_3_7, **invoke_attr) -> sfn_tasks.LambdaInvoke: if not fx: fx = aws_lambda.Function(self, "fxi_{}".format(name), code=code, handler=handler, runtime=runtime) return sfn_tasks.LambdaInvoke(self, "{}".format(name), lambda_function=fx, **invoke_attr)
def build(scope: core.Construct, id: str, *, cluster_name: str, cluster_tags: List[core.Tag], profile_namespace: str, profile_name: str, configuration_namespace: str, configuration_name: str, output_path: Optional[str] = None, result_path: Optional[str] = None) -> sfn.Task: # We use a nested Construct to avoid collisions with Lambda and Task ids construct = core.Construct(scope, id) load_cluster_configuration_lambda = emr_lambdas.LoadClusterConfigurationBuilder.build( construct, profile_namespace=profile_namespace, profile_name=profile_name, configuration_namespace=configuration_namespace, configuration_name=configuration_name) return sfn_tasks.LambdaInvoke( construct, 'Load Cluster Configuration', output_path=output_path, result_path=result_path, lambda_function=load_cluster_configuration_lambda, payload_response_only=True, payload=sfn.TaskInput.from_object({ 'ClusterName': cluster_name, 'ClusterTags': [{ 'Key': t.key, 'Value': t.value } for t in cluster_tags], 'ProfileNamespace': profile_namespace, 'ProfileName': profile_name, 'ConfigurationNamespace': configuration_namespace, 'ConfigurationName': configuration_name, }), )
def __init__(self, scope: core.Construct, id: str, name: str, state_machine: sfn.StateMachine, input: Optional[Mapping[str, any]] = None, fail_chain: Optional[sfn.IChainable] = None): super().__init__(scope, id) state_machine_task = emr_tasks.StartExecutionTask( self, name, state_machine=state_machine, input=input, integration_pattern=sfn.IntegrationPattern.RUN_JOB, ) parse_json_string = emr_lambdas.ParseJsonStringBuilder.get_or_build( self) parse_json_string_task = sfn_tasks.LambdaInvoke( self, f'{name} - Parse JSON Output', result_path='$', lambda_function=parse_json_string, payload_response_only=True, payload=sfn.TaskInput.from_object( {'JsonString': sfn.TaskInput.from_data_at('$.Output').value}), ) if fail_chain: state_machine_task.add_catch(fail_chain, errors=['States.ALL'], result_path='$.Error') parse_json_string_task.add_catch(fail_chain, errors=['States.ALL'], result_path='$.Error') state_machine_task.next(parse_json_string_task) self._start = state_machine_task self._end = parse_json_string_task
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) hello_world = aws_lambda.Function( self, "HelloWorld", code=aws_lambda.Code.from_asset("./lambdas/hello_world"), handler="function.handler.handler", timeout=core.Duration.seconds(5), runtime=aws_lambda.Runtime.PYTHON_3_8, ) aws_ssm.StringParameter( self, "HelloWorldLambdaArn", string_value=hello_world.function_arn, parameter_name=f"/integration_tests/{id}/hello_world_lambda_arn", ) hello_world_task = aws_stepfunctions_tasks.LambdaInvoke( self, "InvokeHelloWorld", lambda_function=hello_world, result_path="$.hello_message", ) step_function = aws_stepfunctions.StateMachine( self, "Hello World Step Function", definition=aws_stepfunctions.Chain.start(hello_world_task), ) aws_ssm.StringParameter( self, "HelloWorldStepFunctionArn", string_value=step_function.state_machine_arn, parameter_name=f"/integration_tests/{id}/hello_world_step_function_arn", )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) lambda_role = _iam.Role( self, id='lab3-om-role', assumed_by=_iam.ServicePrincipal('lambda.amazonaws.com')) cloudwatch_policy_statement = _iam.PolicyStatement( effect=_iam.Effect.ALLOW) cloudwatch_policy_statement.add_actions("logs:CreateLogGroup") cloudwatch_policy_statement.add_actions("logs:CreateLogStream") cloudwatch_policy_statement.add_actions("logs:PutLogEvents") cloudwatch_policy_statement.add_actions("logs:DescribeLogStreams") cloudwatch_policy_statement.add_resources("*") lambda_role.add_to_policy(cloudwatch_policy_statement) fn_lambda_approve_reject = aws_lambda.Function( self, "lab3-om-approve-reject", code=aws_lambda.AssetCode( "../lambda-functions/approve-reject-application/"), handler="app.handler", tracing=aws_lambda.Tracing.ACTIVE, timeout=core.Duration.seconds(30), role=lambda_role, runtime=aws_lambda.Runtime.PYTHON_3_8) fn_lambda_verify_identity = aws_lambda.Function( self, "lab3-om-verify-identity", code=aws_lambda.AssetCode("../lambda-functions/verify-identity/"), handler="app.handler", tracing=aws_lambda.Tracing.ACTIVE, timeout=core.Duration.seconds(30), role=lambda_role, runtime=aws_lambda.Runtime.PYTHON_3_8) fn_lambda_check_address = aws_lambda.Function( self, "lab3-om-check-address", code=aws_lambda.AssetCode("../lambda-functions/check-address/"), handler="app.handler", tracing=aws_lambda.Tracing.ACTIVE, timeout=core.Duration.seconds(30), role=lambda_role, runtime=aws_lambda.Runtime.PYTHON_3_8) ''' [INFO] This is a sample how to define the task and integrate with Lambda Functions. You need to create another 2 tasks for respective Lambda functions ''' task_verify_identity = _tasks.LambdaInvoke( self, "Verify Identity Document", lambda_function=fn_lambda_verify_identity, output_path="$.Payload") task_check_address = _tasks.LambdaInvoke( self, "Check Address", lambda_function=fn_lambda_check_address, output_path="$.Payload") task_wait_review = _tasks.LambdaInvoke( self, "Wait for Review", lambda_function=fn_lambda_approve_reject, output_path="$.Payload") state_approve = _sfn.Succeed(self, "Approve Application") state_reject = _sfn.Succeed(self, "Reject Application") # Let's define the State Machine, step by step # First, paralell tasks for verification s_verification = _sfn.Parallel(self, "Verification") s_verification.branch(task_verify_identity) s_verification.branch(task_check_address) # Next, we add a choice state c_human_review = _sfn.Choice(self, "Human review required?") c_human_review.when( _sfn.Condition.and_( _sfn.Condition.boolean_equals("$[0].humanReviewRequired", False), _sfn.Condition.boolean_equals("$[1].humanReviewRequired", True)), state_approve) c_human_review.when( _sfn.Condition.or_( _sfn.Condition.boolean_equals("$[0].humanReviewRequired", True), _sfn.Condition.boolean_equals("$[1].humanReviewRequired", False)), task_wait_review) # Another choice state to check if the application passed the review c_review_approved = _sfn.Choice(self, "Review approved?") c_review_approved.when( _sfn.Condition.boolean_equals("$.reviewApproved", True), state_approve) c_review_approved.when( _sfn.Condition.boolean_equals("$.reviewApproved", False), state_reject) task_wait_review.next(c_review_approved) definition = s_verification.next(c_human_review) _sfn.StateMachine(self, "lab3-statemachine", definition=definition, timeout=core.Duration.minutes(5))
def __init__(self, scope: Construct, construct_id: str, env, **kwargs) -> None: super().__init__(scope, construct_id, env=env, **kwargs) rg_property = network_fw.CfnRuleGroup.RuleGroupProperty( rule_variables=None, rules_source=network_fw.CfnRuleGroup.RulesSourceProperty( stateless_rules_and_custom_actions=network_fw.CfnRuleGroup. StatelessRulesAndCustomActionsProperty(stateless_rules=[ network_fw.CfnRuleGroup.StatelessRuleProperty( priority=10, rule_definition=network_fw.CfnRuleGroup. RuleDefinitionProperty( actions=["aws:drop"], match_attributes=network_fw.CfnRuleGroup. MatchAttributesProperty(destinations=[ network_fw.CfnRuleGroup.AddressProperty( address_definition="127.0.0.1/32") ]))) ]))) nf_rule_group = network_fw.CfnRuleGroup( scope=self, id='GuardDutyNetworkFireWallRuleGroup', capacity=100, rule_group_name='guardduty-network-firewall', type='STATELESS', description='Guard Duty network firewall rule group', tags=[CfnTag(key='Name', value='cfn.rule-group.stack')], rule_group=rg_property) """ https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-rule-dlq.html#dlq-considerations """ dlq_statemachine = sqs.Queue(self, 'DLQStateMachine', queue_name='dlq_state_machine') guardduty_firewall_ddb = ddb.Table( scope=self, id=f'GuarddutyFirewallDDB', table_name='GuardDutyFirewallDDBTable', removal_policy=RemovalPolicy.DESTROY, partition_key=ddb.Attribute(name='HostIp', type=ddb.AttributeType.STRING), billing_mode=ddb.BillingMode.PAY_PER_REQUEST) """ IAM role for ddb permission """ nf_iam_role = iam.Role( self, 'DDBRole', role_name=f'ddb-nf-role-{env.region}', assumed_by=iam.ServicePrincipal(service='lambda.amazonaws.com')) nf_iam_role.add_to_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, resources=["arn:aws:logs:*:*:*"], actions=[ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents" ])) nf_iam_role.add_to_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, resources=[ guardduty_firewall_ddb.table_arn, f"{guardduty_firewall_ddb.table_arn}/*" ], actions=[ "dynamodb:PutItem", "dynamodb:GetItem", "dynamodb:Scan" ])) nf_iam_role.add_to_policy( iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=[nf_rule_group.ref, f"{nf_rule_group.ref}/*"], actions=[ "network-firewall:DescribeRuleGroup", "network-firewall:UpdateRuleGroup" ])) record_ip_in_db = _lambda.Function( self, 'RecordIpInDB', function_name='record-ip-in-ddb', runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.from_asset('lambda_fns'), handler='addIPToDDB.handler', environment=dict(ACLMETATABLE=guardduty_firewall_ddb.table_name), role=nf_iam_role) """ https://docs.amazonaws.cn/en_us/eventbridge/latest/userguide/eb-event-patterns-content-based-filtering.html """ record_ip_task = step_fn_task.LambdaInvoke( self, 'RecordIpDDBTask', lambda_function=record_ip_in_db, payload=step_fn.TaskInput.from_object({ "comment": "Relevant fields from the GuardDuty / Security Hub finding", "HostIp.$": "$.detail.findings[0].ProductFields.aws/guardduty/service/action/networkConnectionAction/remoteIpDetails/ipAddressV4", "Timestamp.$": "$.detail.findings[0].ProductFields.aws/guardduty/service/eventLastSeen", "FindingId.$": "$.id", "AccountId.$": "$.account", "Region.$": "$.region" }), result_path='$', payload_response_only=True) firewall_update_rule = _lambda.Function( scope=self, id='GuardDutyUpdateNetworkFirewallRule', function_name='gurdduty-update-networkfirewal-rule-group', runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.from_asset('lambda_fns'), handler='updateNetworkFireWall.handler', environment=dict( FIREWALLRULEGROUP=nf_rule_group.ref, RULEGROUPPRI='30000', CUSTOMACTIONNAME='GuardDutytoFirewall', CUSTOMACTIONVALUE='gurdduty-update-networkfirewal-rule-group'), role=nf_iam_role) firewall_update_rule_task = step_fn_task.LambdaInvoke( self, 'FirewallUpdateRuleTask', lambda_function=firewall_update_rule, input_path='$', result_path='$', payload_response_only=True) firewall_no_update_job = step_fn.Pass(self, 'No Firewall change') notify_failure_job = step_fn.Fail(self, 'NotifyFailureJob', cause='Any Failure', error='Unknown') send_to_slack = _lambda.Function( scope=self, id='SendAlertToSlack', function_name='gurdduty-networkfirewal-to-slack', runtime=_lambda.Runtime.PYTHON_3_8, handler="sendSMSToSlack.handler", code=_lambda.Code.from_asset('lambda_fns')) send_slack_task = step_fn_task.LambdaInvoke( scope=self, id='LambdaToSlackDemo', lambda_function=send_to_slack, input_path='$', result_path='$') is_new_ip = step_fn.Choice(self, "New IP?") is_block_succeed = step_fn.Choice(self, "Block sucessfully?") definition = step_fn.Chain \ .start(record_ip_task .add_retry(errors=["States.TaskFailed"], interval=Duration.seconds(2), max_attempts=2) .add_catch(errors=["States.ALL"], handler=notify_failure_job)) \ .next(is_new_ip .when(step_fn.Condition.boolean_equals('$.NewIP', True), firewall_update_rule_task .add_retry(errors=["States.TaskFailed"], interval=Duration.seconds(2), max_attempts=2 ) .add_catch(errors=["States.ALL"], handler=notify_failure_job) .next( is_block_succeed .when(step_fn.Condition.boolean_equals('$.Result', False), notify_failure_job) .otherwise(send_slack_task) ) ) .otherwise(firewall_no_update_job) ) guardduty_state_machine = step_fn.StateMachine( self, 'GuarddutyStateMachine', definition=definition, timeout=Duration.minutes(5), state_machine_name='guardduty-state-machine') event.Rule( scope=self, id='EventBridgeCatchIPv4', description="Security Hub - GuardDuty findings with remote IP", rule_name='guardduty-catch-ipv4', event_pattern=event.EventPattern( account=['123456789012'], detail_type=["GuardDuty Finding"], source=['aws.securityhub'], detail={ "findings": { "ProductFields": { "aws/guardduty/service/action/networkConnectionAction/remoteIpDetails/ipAddressV4": [{ "exists": True }] } } }), targets=[ event_target.SfnStateMachine( machine=guardduty_state_machine, dead_letter_queue=dlq_statemachine) ]) """ Send other findings to slack """ send_finding_to_slack = _lambda.Function( self, 'SendFindingToSlack', function_name='send-finding-to-slack', runtime=_lambda.Runtime.PYTHON_3_8, handler="sendFindingToSlack.handler", code=_lambda.Code.from_asset('lambda_fns')) send_findings_task = step_fn_task.LambdaInvoke( self, 'SendFindingToSlackTask', lambda_function=send_finding_to_slack, payload=step_fn.TaskInput.from_object({ "comment": "Others fields from the GuardDuty / Security Hub finding", "severity.$": "$.detail.findings[0].Severity.Label", "Account_ID.$": "$.account", "Finding_ID.$": "$.id", "Finding_Type.$": "$.detail.findings[0].Types", "Region.$": "$.region", "Finding_description.$": "$.detail.findings[0].Description" }), result_path='$') slack_failure_job = step_fn.Fail(self, 'SlackNotifyFailureJob', cause='Any Failure', error='Unknown') finding_definition = step_fn.Chain \ .start(send_findings_task .add_retry(errors=["States.TaskFailed"], interval=Duration.seconds(2), max_attempts=2) .add_catch(errors=["States.ALL"], handler=slack_failure_job)) sechub_findings_state_machine = step_fn.StateMachine( self, 'SecHubFindingsStateMachine', definition=finding_definition, timeout=Duration.minutes(5), state_machine_name='sechub-finding-state-machine') event.Rule(scope=self, id='EventBridgeFindings', description="Security Hub - GuardDuty findings others", rule_name='others-findings', event_pattern=event.EventPattern( account=['123456789012'], source=['aws.securityhub'], detail_type=['Security Hub Findings - Imported'], detail={"severity": [5, 8]}), targets=[ event_target.SfnStateMachine( machine=sechub_findings_state_machine, dead_letter_queue=dlq_statemachine) ])
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) #################################################################################### # IoT Events # IoT Events: Execution role iot_events_execution_role = iam.Role( self, "IoTEventsExecutionRole", assumed_by=iam.ServicePrincipal("iotevents.amazonaws.com")) iot_events_execution_role.add_to_policy( iam.PolicyStatement(resources=["*"], actions=["iot:Publish"])) iot_events_execution_role.add_to_policy( iam.PolicyStatement(resources=["*"], actions=["SNS:Publish"])) # IoT Events: Input inputDefinitionProperty = iotevents.CfnInput.InputDefinitionProperty( attributes=[{ "jsonPath": "gatewayid" }, { "jsonPath": "last_uplink_received_timestamp_ms" }, { "jsonPath": "last_connection_status" }, { "jsonPath": "timestamp_iso8601" }]) iot_events_input = iotevents.CfnInput( self, "LoRaWANGatewayConnectivityStatusInput", input_definition=inputDefinitionProperty, input_name="LoRaWANGatewayConnectivityStatusInput", input_description= "Input for connectivity status updates for LoRaWAN gateways") # IoT Events: Detector Model detector_model_definition = iotevents.CfnDetectorModel.DetectorModelDefinitionProperty( initial_state_name=lorawan_gateway_monitoring_detectormodel. initial_state_name, states=lorawan_gateway_monitoring_detectormodel.get_states(self)) iot_events_model = iotevents.CfnDetectorModel( self, "LoRaWANGatewayConnectivityModel", detector_model_definition=detector_model_definition, detector_model_name="LoRaWANGatewayConnectivityModel", detector_model_description= "Detector model for LoRaWAN gateway connectivity status", key="gatewayid", evaluation_method="BATCH", role_arn=iot_events_execution_role.role_arn) #################################################################################### # Lambda function GetWirelessGatewayStatisticsLambda # Lambda function GetWirelessGatewayStatisticsLambda: Execution Role get_wireless_gateway_statistics_lambda_role = iam.Role( self, "GetWirelessGatewayStatisticsLambdaExecutionRole", assumed_by=iam.ServicePrincipal("lambda.amazonaws.com")) get_wireless_gateway_statistics_lambda_role.add_to_policy( iam.PolicyStatement(resources=[ "arn:aws:iotwireless:" + self.region + ":" + self.account + ":WirelessGateway/*" ], actions=[ "iotwireless:ListWirelessGateways", "iotwireless:GetWirelessGatewayStatistics" ])) get_wireless_gateway_statistics_lambda_role.add_to_policy( iam.PolicyStatement(resources=[ "arn:aws:iotevents:" + self.region + ":" + self.account + ":input/LoRaWANGatewayConnectivityStatusInput" ], actions=["iotevents:BatchPutMessage"])) # Lambda function GetWirelessGatewayStatisticsLambda: Lambda function configuration get_wireless_gateway_statistics_lambda = lambda_.Function( self, "GetWirelessGatewayStatisticsLambda", code=lambda_.Code.asset( "src_get_wireless_gateway_statistics_lambda"), runtime=lambda_.Runtime.PYTHON_3_7, handler="lambda.handler", role=get_wireless_gateway_statistics_lambda_role, timeout=cdk.Duration.seconds(25)) get_wireless_gateway_statistics_lambda.add_environment( "TEST_MODE", "true") get_wireless_gateway_statistics_lambda.add_environment( "IOT_EVENTS_INPUT_NAME", "LoRaWANGatewayConnectivityStatusInput") #################################################################################### # SNS topic sns_topic = sns.Topic( self, "LoRaWANGatewayNotificationTopic", display_name= "Topic to use for notifications about LoRaWAN gateway events like connect or disconnect", topic_name="LoRaWANGatewayNotificationTopic") email_address = cdk.CfnParameter(self, "emailforalarms") sns_topic.add_subscription( subscriptions.EmailSubscription(email_address.value_as_string)) #################################################################################### # Step Function # State 'Fail' failure_state = sfn.Fail(self, "Fail") # State 'Wait' wait_state = sfn.Wait(self, "Sleep", time=sfn.WaitTime.duration( cdk.Duration.minutes(4))) # State 'Ingest gateway connectivity status into IoT Events input' lambda_invoke_state = tasks.LambdaInvoke( self, "Ingest gateway connectivity status into IoT Events input", result_path="$.wireless_gateway_stats", lambda_function=get_wireless_gateway_statistics_lambda # payload=task_input_payload ) # Stat 'Did IoT events ingestion run successfull?' choice_lambda_state = sfn.Choice( self, "Did IoT events ingestion run successfull?") choice_lambda_state.when( sfn.Condition.number_equals( "$.wireless_gateway_stats.Payload.status", 200), wait_state) choice_lambda_state.otherwise(failure_state) # Define transitions wait_state.next(lambda_invoke_state) lambda_invoke_state.next(choice_lambda_state) # Crreate a state machine gateway_watchdog_state_machine = sfn.StateMachine( self, "LoRaWANGatewayWatchdogStatemachine", definition=lambda_invoke_state, state_machine_name="LoRaWANGatewayWatchdogStatemachine") #################################################################################### # CloudFormation Stack outputs cdk.CfnOutput( self, "StateMachineARN", value=gateway_watchdog_state_machine.state_machine_arn, description= "Please run 'aws stepfunctions start-execution --state-machine-arn <LorawanConnectivityWatchdogStack.StateMachineARN>' to start the monitoring of LoRaWAN gateway connectivity", ) cdk.CfnOutput( self, "StateMachineStartCommand", value='aws stepfunctions start-execution --state-machine-arn ' + gateway_watchdog_state_machine.state_machine_arn, description= "Please run this command to start the monitoring of LoRaWAN gateway connectivity", ) cdk.CfnOutput( self, "StateMachineStopommand", value='aws stepfunctions stop-execution --state-machine-arn ' + gateway_watchdog_state_machine.state_machine_arn, description= "Please run this command to stop the monitoring of LoRaWAN gateway connectivity", )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # ------ Necessary Roles ------ roles = IamRole( self, 'IamRoles' ) # ------ S3 Buckets ------ # Create Athena bucket athena_bucket = _s3.Bucket(self, "AthenaBucket", removal_policy=core.RemovalPolicy.DESTROY ) # Create Forecast bucket forecast_bucket = _s3.Bucket(self, "FoecastBucket", removal_policy=core.RemovalPolicy.DESTROY ) # ------ Athena ------ # Config Athena query result output location workgroup_prop = _athena.CfnWorkGroup.WorkGroupConfigurationProperty( result_configuration=_athena.CfnWorkGroup.ResultConfigurationProperty( output_location="s3://"+athena_bucket.bucket_name ) ) # Create Athena workgroup athena_workgroup = _athena.CfnWorkGroup( self, 'ForecastGroup', name='ForecastGroup', recursive_delete_option=True, state='ENABLED', work_group_configuration=workgroup_prop ) # ------ SNS Topic ------ topic = sns.Topic( self, 'NotificationTopic', display_name='StepsTopic' ) # SNS email subscription. Get the email address from context value(cdk.json) topic.add_subscription(subs.EmailSubscription(self.node.try_get_context('my_email'))) # ------ Layers ------ shared_layer = _lambda.LayerVersion( self, 'LambdaLayer', layer_version_name='testfolderlayer', code=_lambda.AssetCode('shared/') ) # ------ Lambdas for stepfuctions------ create_dataset_lambda = _lambda.Function( self, 'CreateDataset', function_name='CreateDataset', code=_lambda.Code.asset('lambdas/createdataset/'), handler='dataset.lambda_handler', runtime=_lambda.Runtime.PYTHON_3_7, role=roles.lambda_role, timeout=core.Duration.seconds(30), layers=[shared_layer] ) create_dataset_group_lambda = _lambda.Function( self, 'CreateDatasetGroup', function_name='CreateDatasetGroup', code = _lambda.Code.asset('lambdas/createdatasetgroup/'), handler = 'datasetgroup.lambda_handler', runtime = _lambda.Runtime.PYTHON_3_7, role=roles.lambda_role, layers=[shared_layer] ) import_data_lambda = _lambda.Function( self, 'CreateDatasetImportJob', function_name='CreateDatasetImportJob', code = _lambda.Code.asset('lambdas/createdatasetimportjob/'), handler = 'datasetimport.lambda_handler', runtime = _lambda.Runtime.PYTHON_3_7, role=roles.lambda_role, environment= { 'FORECAST_ROLE': roles.forecast_role.role_arn }, layers=[shared_layer] ) create_predictor_lambda = _lambda.Function( self, 'CreatePredictor', function_name='CreatePredictor', code = _lambda.Code.asset('lambdas/createpredictor/'), handler = 'predictor.lambda_handler', runtime = _lambda.Runtime.PYTHON_3_7, role=roles.lambda_role, layers=[shared_layer] ) create_forecast_lambda = _lambda.Function( self, 'CreateForecast', function_name='CreateForecast', code = _lambda.Code.asset('lambdas/createforecast/'), handler = 'forecast.lambda_handler', runtime = _lambda.Runtime.PYTHON_3_7, role=roles.lambda_role, environment= { 'EXPORT_ROLE': roles.forecast_role.role_arn }, layers=[shared_layer], timeout=core.Duration.seconds(30) ) # Deploy lambda with python dependencies from requirements.txt update_resources_lambda = _lambda_python.PythonFunction( self, 'UpdateResources', function_name='UpdateResources', entry='lambdas/updateresources/', index='update.py', handler='lambda_handler', runtime = _lambda.Runtime.PYTHON_3_7, role=roles.update_role, environment= { 'ATHENA_WORKGROUP': athena_workgroup.name, 'ATHENA_BUCKET' : athena_bucket.bucket_name }, layers=[shared_layer], timeout=core.Duration.seconds(900) ) notify_lambda = _lambda.Function( self, 'NotifyTopic', function_name='NotifyTopic', code = _lambda.Code.asset('lambdas/notify/'), handler = 'notify.lambda_handler', runtime = _lambda.Runtime.PYTHON_3_7, role=roles.lambda_role, environment= { 'SNS_TOPIC_ARN': topic.topic_arn }, layers=[shared_layer] ) delete_forecast_lambda = _lambda.Function( self, 'DeleteForecast', function_name='DeleteForecast', code = _lambda.Code.asset('lambdas/deleteforecast/'), handler = 'deleteforecast.lambda_handler', runtime = _lambda.Runtime.PYTHON_3_7, role=roles.lambda_role, layers=[shared_layer] ) delete_predctor_lambda = _lambda.Function( self, 'DeletePredictor', function_name='DeletePredictor', code = _lambda.Code.asset('lambdas/deletepredictor/'), handler = 'deletepredictor.lambda_handler', runtime = _lambda.Runtime.PYTHON_3_7, role=roles.lambda_role, layers=[shared_layer] ) delete_importjob_lambda = _lambda.Function( self, 'DeleteImportJob', function_name='DeleteImportJob', code = _lambda.Code.asset('lambdas/deletedatasetimport/'), handler = 'deletedataset.lambda_handler', runtime = _lambda.Runtime.PYTHON_3_7, role=roles.lambda_role, layers=[shared_layer] ) # ------ StepFunctions ------ strategy_choice = sfn.Choice( self, 'Strategy-Choice' ) success_state = sfn.Succeed( self, 'SuccessState' ) failed = sfn_tasks.LambdaInvoke( self, 'Failed', lambda_function = notify_lambda, result_path=None ).next(strategy_choice) create_dataset_job = sfn_tasks.LambdaInvoke( self, 'Create-Dataset', lambda_function = create_dataset_lambda, retry_on_service_exceptions=True, payload_response_only=True ) self.add_retry_n_catch(create_dataset_job, failed) create_dataset_group_job = sfn_tasks.LambdaInvoke( self, 'Create-DatasetGroup', lambda_function = create_dataset_group_lambda, payload_response_only=True ) self.add_retry_n_catch(create_dataset_group_job, failed) import_data_job = sfn_tasks.LambdaInvoke( self, 'Import-Data', lambda_function = import_data_lambda, payload_response_only=True ) self.add_retry_n_catch(import_data_job, failed) create_predictor_job = sfn_tasks.LambdaInvoke( self, 'Create-Predictor', lambda_function = create_predictor_lambda, payload_response_only=True ) self.add_retry_n_catch(create_predictor_job, failed) create_forecast_job = sfn_tasks.LambdaInvoke( self, 'Create-Forecast', lambda_function = create_forecast_lambda, payload_response_only=True ) self.add_retry_n_catch(create_forecast_job, failed) update_resources_job = sfn_tasks.LambdaInvoke( self, 'Update-Resources', lambda_function = update_resources_lambda, payload_response_only=True ) self.add_retry_n_catch(update_resources_job, failed) notify_success = sfn_tasks.LambdaInvoke( self, 'Notify-Success', lambda_function = notify_lambda, payload_response_only=True ) delete_forecast_job = sfn_tasks.LambdaInvoke( self, 'Delete-Forecast', lambda_function = delete_forecast_lambda, payload_response_only=True ) self.delete_retry(delete_forecast_job) delete_predictor_job = sfn_tasks.LambdaInvoke( self, 'Delete-Predictor', lambda_function = delete_predctor_lambda, payload_response_only=True ) self.delete_retry(delete_predictor_job) delete_import_job = sfn_tasks.LambdaInvoke( self, 'Delete-ImportJob', lambda_function = delete_importjob_lambda, payload_response_only=True ) self.delete_retry(delete_import_job) definition = create_dataset_job\ .next(create_dataset_group_job)\ .next(import_data_job)\ .next(create_predictor_job)\ .next(create_forecast_job)\ .next(update_resources_job)\ .next(notify_success)\ .next(strategy_choice.when(sfn.Condition.boolean_equals('$.params.PerformDelete', False), success_state)\ .otherwise(delete_forecast_job).afterwards())\ .next(delete_predictor_job)\ .next(delete_import_job) deployt_state_machine = sfn.StateMachine( self, 'StateMachine', definition = definition # role=roles.states_execution_role ) # S3 event trigger lambda s3_lambda = _lambda.Function( self, 'S3Lambda', function_name='S3Lambda', code=_lambda.Code.asset('lambdas/s3lambda/'), handler='parse.lambda_handler', runtime=_lambda.Runtime.PYTHON_3_7, role=roles.trigger_role, environment= { 'STEP_FUNCTIONS_ARN': deployt_state_machine.state_machine_arn, 'PARAMS_FILE': self.node.try_get_context('parameter_file') } ) s3_lambda.add_event_source( event_src.S3EventSource( bucket=forecast_bucket, events=[_s3.EventType.OBJECT_CREATED], filters=[_s3.NotificationKeyFilter( prefix='train/', suffix='.csv' )] ) ) # CloudFormation output core.CfnOutput( self, 'StepFunctionsName', description='Step Functions Name', value=deployt_state_machine.state_machine_name ) core.CfnOutput( self, 'ForecastBucketName', description='Forecast bucket name to drop you files', value=forecast_bucket.bucket_name ) core.CfnOutput( self, 'AthenaBucketName', description='Athena bucket name to drop your files', value=athena_bucket.bucket_name )
def __init__(self, scope, id, *args, **kwargs): super().__init__(scope, id, *args, **kwargs) # Buckets source_bucket = s3.Bucket(self, "SourceBucket") dest_bucket = s3.Bucket(self, "DestinationBucket") processing_bucket = s3.Bucket(self, "ProcessingBucket") # Lambda Functions generate_workflow_input_lambda = aws_lambda.Function( self, "GenerateWorkflowInputFunction", code=aws_lambda.Code.from_asset(str(DIST_PATH)), runtime=aws_lambda.Runtime.PYTHON_3_8, handler="generate_workflow_input.lambda_handler", environment={ "InputBucketName": source_bucket.bucket_name, "ProcessingBucketName": processing_bucket.bucket_name, "OutputBucketName": dest_bucket.bucket_name } ) check_workflow_ready_lambda = aws_lambda.Function( self, "CheckWorkflowReadyFunction", code=aws_lambda.Code.from_asset(str(DIST_PATH)), runtime=aws_lambda.Runtime.PYTHON_3_8, handler="check_workflow_ready.lambda_handler" ) string_replace_lambda = aws_lambda.Function( self, "StringReplaceFunction", code=aws_lambda.Code.from_asset(str(DIST_PATH)), runtime=aws_lambda.Runtime.PYTHON_3_8, handler="string_replace.lambda_handler" ) calculate_total_earnings_lambda = aws_lambda.Function( self, "CalculateTotalEarningsFunction", code=aws_lambda.Code.from_asset(str(DIST_PATH)), runtime=aws_lambda.Runtime.PYTHON_3_8, handler="calculate_total_earnings.lambda_handler" ) convert_csv_to_json_lambda = aws_lambda.Function( self, "ConvertCsvToJsonFunction", code=aws_lambda.Code.from_asset(str(DIST_PATH)), runtime=aws_lambda.Runtime.PYTHON_3_8, handler="convert_csv_to_json.lambda_handler" ) # Permissions source_bucket.grant_read(check_workflow_ready_lambda) source_bucket.grant_read(string_replace_lambda) processing_bucket.grant_write(string_replace_lambda) processing_bucket.grant_read_write(calculate_total_earnings_lambda) processing_bucket.grant_read(convert_csv_to_json_lambda) dest_bucket.grant_write(convert_csv_to_json_lambda) # Outputs core.CfnOutput(self, "SourceBucketName", value=source_bucket.bucket_name) core.CfnOutput(self, "DestinationBucketName", value=dest_bucket.bucket_name) core.CfnOutput(self, "ProcessingBucketName", value=processing_bucket.bucket_name) core.CfnOutput(self, "GenerateWorkflowInputLambda", value=generate_workflow_input_lambda.function_name) core.CfnOutput(self, "CheckWorkflowReadyLambda", value=check_workflow_ready_lambda.function_name) core.CfnOutput(self, "StringReplaceLambda", value=string_replace_lambda.function_name) core.CfnOutput(self, "CalculateTotalEarningsLambda", value=calculate_total_earnings_lambda.function_name) core.CfnOutput(self, "ConvertCsvToJsonLambda", value=convert_csv_to_json_lambda.function_name) # State Machine generate_workflow_input_task = sf_tasks.LambdaInvoke( self, "GenerateWorkflowInput", lambda_function=generate_workflow_input_lambda, payload_response_only=True ) check_workflow_ready_task = sf_tasks.LambdaInvoke( self, "CheckWorkflowReady", lambda_function=check_workflow_ready_lambda, input_path="$.CheckWorkflowReady.Input", result_path="$.CheckWorkflowReady.Output", payload_response_only=True ) string_replace_task = sf_tasks.LambdaInvoke( self, "ReplaceString", lambda_function=string_replace_lambda, result_path="$.StringReplace.Output", payload_response_only=True ) calculate_total_earnings_task = sf_tasks.LambdaInvoke( self, "CalculateTotalEarnings", lambda_function=calculate_total_earnings_lambda, input_path="$.CalculateTotalEarnings.Input", result_path="$.CalculateTotalEarnings.Output", payload_response_only=True ) convert_csv_to_json_task = sf_tasks.LambdaInvoke( self, "ConvertCsvToJson", lambda_function=convert_csv_to_json_lambda, input_path="$.ConvertCsvToJson.Input", result_path="$.ConvertCsvToJson.Output", payload_response_only=True ) end_task = sf.Succeed(self, "WorkflowEnd") replace_string_parallel = sf.Map( self, "ReplaceStringParallel", items_path="$.StringReplace.Input", result_path="$.StringReplace.Output" ).iterator(string_replace_task) workflow_steps = sf.Chain.\ start(replace_string_parallel)\ .next(calculate_total_earnings_task)\ .next(convert_csv_to_json_task)\ .next(end_task) run_workflow = sf.Choice(self, "RunWorkflowDecision")\ .when(sf.Condition.boolean_equals("$.CheckWorkflowReady.Output", True), workflow_steps)\ .otherwise(end_task) hello_workflow_state_machine = sf.StateMachine( self, "HelloWorkflowStateMachine", definition=sf.Chain\ .start(generate_workflow_input_task)\ .next(check_workflow_ready_task)\ .next(run_workflow) )
def __init__(self, scope: core.Construct, id: str, redshift_cluster_name: str, user_secret: Secret) -> None: super().__init__(scope, id) stack = Stack.of(self) subprocess.call( ['pip', 'install', '-t', 'dwh/dwh_loader_layer/python/lib/python3.8/site-packages', '-r', 'dwh/dwh_loader/requirements.txt', '--platform', 'manylinux1_x86_64', '--only-binary=:all:', '--upgrade']) requirements_layer = _lambda.LayerVersion(scope=self, id='PythonRequirementsTemplate', code=_lambda.Code.from_asset('dwh/dwh_loader_layer'), compatible_runtimes=[_lambda.Runtime.PYTHON_3_8]) dwh_loader_role = _iam.Role( self, 'Role', assumed_by=_iam.ServicePrincipal('lambda.amazonaws.com') ) dwh_loader_role.add_managed_policy(_iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaBasicExecutionRole' )) dwh_loader_role.attach_inline_policy( _iam.Policy( self, 'InlinePolicy', statements=[ _iam.PolicyStatement( actions=[ "redshift-data:ExecuteStatement", "redshift-data:CancelStatement", "redshift-data:ListStatements", "redshift-data:GetStatementResult", "redshift-data:DescribeStatement", "redshift-data:ListDatabases", "redshift-data:ListSchemas", "redshift-data:ListTables", "redshift-data:DescribeTable" ], resources=['*'] ), _iam.PolicyStatement( actions=["secretsmanager:GetSecretValue"], resources=[user_secret.secret_arn] ), _iam.PolicyStatement( actions=["redshift:GetClusterCredentials"], resources=[ "arn:aws:redshift:*:*:dbname:*/*", "arn:aws:redshift:*:*:dbuser:*/"+_config.Redshift.ETL_USER ] ), _iam.PolicyStatement( effect=_iam.Effect('DENY'), actions=["redshift:CreateClusterUser"], resources=["arn:aws:redshift:*:*:dbuser:*/"+_config.Redshift.ETL_USER] ), _iam.PolicyStatement( conditions={ 'StringLike': { "iam:AWSServiceName": "redshift-data.amazonaws.com" } }, actions=["iam:CreateServiceLinkedRole"], resources=["arn:aws:iam::*:role/aws-service-role/redshift-data.amazonaws.com/AWSServiceRoleForRedshift"] ), ] ) ) dwh_loader_function = _lambda.Function( self, 'Lambda', runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.from_asset('dwh/dwh_loader'), handler='dwh_loader.handler', function_name='dwh-loader', environment={ 'CLUSTER_NAME': redshift_cluster_name, 'PROCEDURE': _config.Redshift.ETL_PROCEDURE, 'SECRET_ARN': user_secret.secret_arn, 'DATABASE': _config.Redshift.DATABASE, 'REGION': core.Aws.REGION, 'SCHEMA': _config.Redshift.SCHEMA }, layers=[requirements_layer], timeout=core.Duration.seconds(30), role=dwh_loader_role ) dwh_loader_submit = _sfn_tasks.LambdaInvoke( self, 'Submit', lambda_function=dwh_loader_function, payload_response_only=True ) dwh_loader_wait = _sfn.Wait( self, 'Wait', time=_sfn.WaitTime.duration(core.Duration.seconds(30)) ) dwh_loader_complete = _sfn.Choice( self, 'Complete' ) dwh_loader_failed = _sfn.Fail( self, 'Fail', cause="Redshift Data API statement failed", error="$.Result.Error" ) dwh_loader_status = _sfn_tasks.LambdaInvoke( self, 'Status', lambda_function=dwh_loader_function, result_path='$.Result', payload_response_only=True ) definition = dwh_loader_submit \ .next(dwh_loader_wait) \ .next(dwh_loader_status) \ .next(dwh_loader_complete .when(_sfn.Condition.string_equals('$.Result.Status', 'FAILED'), dwh_loader_failed) .when(_sfn.Condition.string_equals('$.Result.Status', 'FINISHED'), _sfn.Succeed(self, 'DwhLoaderSuccess')) .otherwise(dwh_loader_wait)) dwh_loader_stepfunctions = _sfn.StateMachine( self, 'StepFunctions', definition=definition, timeout=core.Duration.minutes(30) ) step_trigger = _events.Rule( self, 'StepTrigger', schedule=_events.Schedule.cron(minute='0/30', hour='*', month='*', week_day='*', year='*') ) step_trigger.add_target( _events_targets.SfnStateMachine( machine=dwh_loader_stepfunctions, ) )
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) submit_lambda = PythonFunction(self, 'Submit', handler='handler', index='submit.py', entry=os.path.join( os.getcwd(), 'lambdas'), runtime=lambda_.Runtime.PYTHON_3_8) get_status_lambda = PythonFunction(self, 'Status', handler='handler', index='status.py', entry=os.path.join( os.getcwd(), 'lambdas'), runtime=lambda_.Runtime.PYTHON_3_8) submit_job = tasks.LambdaInvoke( self, "Submit Job", lambda_function=submit_lambda, # Lambda's result is in the attribute `Payload` output_path="$.Payload") wait_x = sfn.Wait(self, "Wait X Seconds", time=sfn.WaitTime.seconds_path("$.waitSeconds")) get_status = tasks.LambdaInvoke( self, "Get Job Status", lambda_function=get_status_lambda, # Pass just the field named "guid" into the Lambda, put the # Lambda's result in a field called "status" in the response output_path="$.Payload") job_failed = sfn.Fail(self, "Job Failed", cause="AWS Batch Job Failed", error="DescribeJob returned FAILED") final_status = tasks.LambdaInvoke( self, "Get Final Job Status", lambda_function=get_status_lambda, # Use "guid" field as input output_path="$.Payload") definition = submit_job.next(wait_x).next(get_status).next( sfn.Choice(self, "Job Complete?").when( sfn.Condition.string_equals("$.status", "FAILED"), job_failed).when( sfn.Condition.string_equals("$.status", "SUCCEEDED"), final_status).otherwise(wait_x)) sfn.StateMachine(self, "StateMachine", definition=definition, timeout=cdk.Duration.minutes(5))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here submit_lambda = aws_lambda_python.PythonFunction( self, "submit-status", entry="./lambdas/example", handler="submit_status", ) get_status_lambda = aws_lambda_python.PythonFunction( self, "get-status", entry="./lambdas/example", handler="get_status") final_status_lambda = aws_lambda_python.PythonFunction( self, "final-status", entry="./lambdas/example", handler="final_status") submit_job = tasks.LambdaInvoke( self, "Submit Job", lambda_function=submit_lambda, payload_response_only=True, ) wait_x = sfn.Wait(self, "Wait X Seconds", time=sfn.WaitTime.seconds_path("$.seconds")) get_status = tasks.LambdaInvoke( self, "Get Job Status", lambda_function=get_status_lambda, payload_response_only=True, ) job_failed = sfn.Fail( self, "Job Failed", cause="AWS Batch Job Failed", error="DescribeJob returned FAILED", ) final_status = tasks.LambdaInvoke( self, "Get Final Job Status", lambda_function=final_status_lambda, payload_response_only=True, ) definition = (submit_job.next(wait_x).next(get_status).next( sfn.Choice(self, "Job Complete?").when( sfn.Condition.string_equals("$.status", "FAILED"), job_failed).when( sfn.Condition.string_equals("$.status", "SUCCEEDED"), final_status).otherwise(wait_x))) sfn.StateMachine( self, "StateMachine", definition=definition, timeout=core.Duration.minutes(5), )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) api = apigateway.RestApi( scope=self, id=f'{constants.PREFIX}-approval-api', rest_api_name='Human approval endpoint', description='HTTP Endpoint backed by API Gateway and Lambda', endpoint_types=[apigateway.EndpointType.REGIONAL], ) v1 = api.root.add_resource("v1") approve_api = v1.add_resource("approve") ################################################# email_topic = sns.Topic( scope=self, id=f'{constants.PREFIX}-email-topic', ) email_topic.add_subscription( subscription=subscriptions.EmailSubscription( email_address=constants.EMAIL_APPROVER, )) ################################################# submit_job_lambda = _lambda.Function( scope=self, id=f'{constants.PREFIX}-submit-lambda', runtime=_lambda.Runtime.PYTHON_3_8, handler='submit.handler', environment={ "TOPIC_ARN": email_topic.topic_arn, "END_POINT": approve_api.url, "TO_ADDRESS": constants.EMAIL_RECIPIENT, "FROM_ADDRESS": constants.EMAIL_SENDER, }, code=_lambda.Code.from_asset( os.path.join('lambdas', 'submit-lambda')), ) email_topic.grant_publish(submit_job_lambda) submit_job_lambda.add_to_role_policy(statement=iam.PolicyStatement( actions=['ses:Send*'], resources=['*'], )) submit_job = tasks.LambdaInvoke( scope=self, id=f'{constants.PREFIX}-submit-job', lambda_function=submit_job_lambda, integration_pattern=sfn.IntegrationPattern.WAIT_FOR_TASK_TOKEN, heartbeat=core.Duration.minutes(5), payload=sfn.TaskInput.from_object({ "token": sfn.Context.task_token, "data": sfn.Data.string_at('$'), }), ) success = sfn.Succeed(scope=self, id=f'{constants.PREFIX}-success', comment='We did it!') fail = sfn.Fail(scope=self, id=f'{constants.PREFIX}-fail', error='WorkflowFailure', cause='Something went wrong') choice = sfn.Choice(scope=self, id=f'{constants.PREFIX}-choice', comment='Was it approved?') choice.when(condition=sfn.Condition.string_equals("$.status", "OK"), next=success) choice.otherwise(fail) definition = submit_job.next(choice) self._state_machine = sfn.StateMachine( scope=self, id=f'{constants.PREFIX}-state-machine', definition=definition, # only 10 mins to approve better be quick timeout=core.Duration.minutes(10)) ################################################# approval_lambda = _lambda.Function( scope=self, id=f'{constants.PREFIX}-approval-lambda', runtime=_lambda.Runtime.PYTHON_3_8, handler='approve.handler', code=_lambda.Code.from_asset( os.path.join('lambdas', 'approve-lambda')), ) approval_lambda.add_to_role_policy(statement=iam.PolicyStatement( actions=['states:Send*'], resources=['*'])) approve_integration = apigateway.LambdaIntegration(approval_lambda) approve_api_get_method = approve_api.add_method( http_method="GET", api_key_required=False, integration=approve_integration, )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.vpc = aws_ec2.Vpc(self, "demo-stepfunctions", cidr="10.100.0.0/16", max_azs=2, nat_gateways=0, subnet_configuration=[ aws_ec2.SubnetConfiguration( name='demo-stepfunctions', subnet_type=aws_ec2.SubnetType.ISOLATED, cidr_mask=24) ]) lambda_role = iam.Role( self, 'demo-lambda-role', assumed_by=iam.ServicePrincipal("lambda.amazonaws.com")) lambda_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaENIManagementAccess')) lambda_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaBasicExecutionRole')) fn_submit = lambda_.Function( self, 'demo-sfn-submit', function_name='demo-sfn-submit', handler='handler.do', runtime=lambda_.Runtime.PYTHON_3_8, code=lambda_.Code.asset('./craftaws/func_submit'), role=lambda_role, timeout=core.Duration.seconds(900), allow_public_subnet=False, vpc=self.vpc, vpc_subnets=aws_ec2.SubnetSelection( subnet_type=aws_ec2.SubnetType.ISOLATED), environment={}) fn_job_1 = lambda_.Function( self, 'demo-sfn-job1', function_name='demo-sfn-job1', handler='handler.do', runtime=lambda_.Runtime.PYTHON_3_8, code=lambda_.Code.asset('./craftaws/func_job_1'), role=lambda_role, timeout=core.Duration.seconds(900), allow_public_subnet=False, vpc=self.vpc, vpc_subnets=aws_ec2.SubnetSelection( subnet_type=aws_ec2.SubnetType.ISOLATED), environment={}) fn_job_2 = lambda_.Function( self, 'demo-sfn-job2', function_name='demo-sfn-job2', handler='handler.do', runtime=lambda_.Runtime.PYTHON_3_8, code=lambda_.Code.asset('./craftaws/func_job_2'), role=lambda_role, timeout=core.Duration.seconds(900), allow_public_subnet=False, vpc=self.vpc, vpc_subnets=aws_ec2.SubnetSelection( subnet_type=aws_ec2.SubnetType.ISOLATED), environment={}) submit_job = tasks.LambdaInvoke( self, "Submit Job", lambda_function=fn_submit, # Lambda's result is in the attribute `Payload` output_path="$.Payload") step_1_job = tasks.LambdaInvoke( self, "Job_1", lambda_function=fn_job_1, # Lambda's result is in the attribute `Payload` output_path="$.Payload") wait_x = sfn.Wait(self, "Wait X Seconds", time=sfn.WaitTime.duration( core.Duration.seconds(60))) step_2_job = tasks.LambdaInvoke( self, "Job_2", lambda_function=fn_job_1, # Lambda's result is in the attribute `Payload` output_path="$.Payload") job_succeed = sfn.Succeed(self, "Job Succeed", comment="AWS Batch Job Succeed") definition = submit_job.next(step_1_job).next(wait_x).next( step_2_job).next(job_succeed) sfn.StateMachine(self, "StateMachine", definition=definition, timeout=core.Duration.minutes(5))
def __init__(self, app: App, id: str, **kwargs) -> None: super().__init__(app, id, **kwargs) # Lambda Handlers Definitions submit_lambda = _lambda.Function( self, 'submitLambda', handler='lambda_function.lambda_handler', runtime=_lambda.Runtime.PYTHON_3_9, code=_lambda.Code.from_asset('lambdas/submit')) status_lambda = _lambda.Function( self, 'statusLambda', handler='lambda_function.lambda_handler', runtime=_lambda.Runtime.PYTHON_3_9, code=_lambda.Code.from_asset('lambdas/status')) # Step functions Definition submit_job = _aws_stepfunctions_tasks.LambdaInvoke( self, "Submit Job", lambda_function=submit_lambda, output_path="$.Payload", ) wait_job = _aws_stepfunctions.Wait( self, "Wait 30 Seconds", time=_aws_stepfunctions.WaitTime.duration(Duration.seconds(30))) status_job = _aws_stepfunctions_tasks.LambdaInvoke( self, "Get Status", lambda_function=status_lambda, output_path="$.Payload", ) fail_job = _aws_stepfunctions.Fail(self, "Fail", cause='AWS Batch Job Failed', error='DescribeJob returned FAILED') succeed_job = _aws_stepfunctions.Succeed( self, "Succeeded", comment='AWS Batch Job succeeded') # Create Chain definition = submit_job.next(wait_job)\ .next(status_job)\ .next(_aws_stepfunctions.Choice(self, 'Job Complete?') .when(_aws_stepfunctions.Condition.string_equals('$.status', 'FAILED'), fail_job) .when(_aws_stepfunctions.Condition.string_equals('$.status', 'SUCCEEDED'), succeed_job) .otherwise(wait_job)) # Create state machine sm = _aws_stepfunctions.StateMachine( self, "StateMachine", definition=definition, timeout=Duration.minutes(5), )