def __init__(self, app: core.App, cfn_name: str, stack_env): super().__init__(scope=app, id=f"{cfn_name}-{stack_env}") # lambda lambda_task = lambda_.Function( scope=self, id=f"{cfn_name}-lambda-task", code=lambda_.AssetCode.from_asset("lambda_script"), handler="lambda_handler.lambda_task", timeout=core.Duration.seconds(10), runtime=self.LAMBDA_PYTHON_RUNTIME, memory_size=128 ) # StepFunction Tasks sns_source = sfn.Pass( scope=self, id=f"{cfn_name}-sfn-pass", comment="pass example", input_path="$", result_path="$.source", result=sfn.Result.from_string("example"), output_path="$" ) arguments_generation = sfn.Task( scope=self, id=f"{cfn_name}-sfn-lambda-task", task=sfn_tasks.RunLambdaTask( lambda_function=lambda_task, payload=sfn.TaskInput.from_object({ "time.$": "$.time", "source.$": "$.source" })), input_path="$", result_path="$.arguments", output_path="$.arguments.Payload" ) # stepfunctions definition = sns_source.next(arguments_generation) _ = sfn.StateMachine( scope=self, id=f"{cfn_name}-SFn-{stack_env}", definition=definition )
def create_state_machine(self, lambda_functions, page_sqs): task_wrapup = aws_stepfunctions.Task( self, "task_wrapup", task = aws_stepfunctions_tasks.RunLambdaTask(lambda_functions["wrapup"]) ) tast_analyze_with_scale = aws_stepfunctions.Task( self, "AnalyzeWithScale", task= aws_stepfunctions_tasks.SendToQueue( queue = page_sqs, message_body = aws_stepfunctions.TaskInput.from_object( { "token": aws_stepfunctions.Context.task_token, "id.$": "$.id", "bucket.$": "$.bucket", "original_upload_pdf.$": "$.original_upload_pdf", "SAGEMAKER_WORKFLOW_AUGMENTED_AI_ARN.$": "$.SAGEMAKER_WORKFLOW_AUGMENTED_AI_ARN", "key.$": "$.key" } ), delay=None, integration_pattern=aws_stepfunctions.ServiceIntegrationPattern.WAIT_FOR_TASK_TOKEN ) ) process_map = aws_stepfunctions.Map( self, "Process_Map", items_path = "$.image_keys", result_path="DISCARD", parameters = { "id.$": "$.id", "bucket.$": "$.bucket", "original_upload_pdf.$": "$.original_upload_pdf", "SAGEMAKER_WORKFLOW_AUGMENTED_AI_ARN.$": "$.SAGEMAKER_WORKFLOW_AUGMENTED_AI_ARN", "key.$": "$$.Map.Item.Value" } ).iterator(tast_analyze_with_scale) definition = process_map.next(task_wrapup) aws_stepfunctions.StateMachine( scope = self, id = "multipagepdfa2i_fancy_stepfunction", state_machine_name = "multipagepdfa2i_fancy_stepfunction", definition=definition )
def build(scope: core.Construct, id: str, *, roles: emr_roles.EMRRoles, kerberos_attributes_secret: Optional[ secretsmanager.Secret] = None, secret_configurations: Optional[Dict[ str, secretsmanager.Secret]] = None, cluster_configuration_path: str = '$.ClusterConfiguration', result_path: Optional[str] = None, output_path: Optional[str] = None, wait_for_cluster_start: bool = True) -> sfn.Task: # We use a nested Construct to avoid collisions with Lambda and Task ids construct = core.Construct(scope, id) event_rule = core.Stack.of(scope).node.try_find_child('EventRule') if event_rule is None: event_rule = events.Rule(construct, 'EventRule', enabled=False, schedule=events.Schedule.rate( core.Duration.minutes(1))) BaseBuilder.tag_construct(event_rule) run_job_flow_lambda = emr_lambdas.RunJobFlowBuilder.get_or_build( construct, roles, event_rule) check_cluster_status_lambda = emr_lambdas.CheckClusterStatusBuilder.get_or_build( construct, event_rule) if kerberos_attributes_secret: run_job_flow_lambda.add_to_role_policy( iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=['secretsmanager:GetSecretValue'], resources=[f'{kerberos_attributes_secret.secret_arn}*'])) if secret_configurations is not None: for secret in secret_configurations.values(): run_job_flow_lambda.add_to_role_policy( iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=['secretsmanager:GetSecretValue'], resources=[f'{secret.secret_arn}*'])) return sfn.Task( construct, 'Start EMR Cluster (with Secrets)', output_path=output_path, result_path=result_path, task=sfn_tasks.RunLambdaTask( run_job_flow_lambda, integration_pattern=sfn.ServiceIntegrationPattern. WAIT_FOR_TASK_TOKEN, payload=sfn.TaskInput.from_object({ 'ExecutionInput': sfn.TaskInput.from_context_at('$$.Execution.Input').value, 'ClusterConfiguration': sfn.TaskInput.from_data_at( cluster_configuration_path).value, 'TaskToken': sfn.Context.task_token, 'CheckStatusLambda': check_cluster_status_lambda.function_arn, 'RuleName': event_rule.rule_name, 'FireAndForget': not wait_for_cluster_start })))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) ### # DynamoDB Table ### # We store Flight, Hotel and Rental Car bookings in the same table. # # For more help with single table DB structures see - https://www.dynamodbbook.com/ # pk - the trip_id e.g. 1234 # sk - bookingtype#booking_id e.g. HOTEL#345634, FLIGHT#574576, PAYMENT#45245 table = dynamo_db.Table(self, "Bookings", partition_key=dynamo_db.Attribute( name="pk", type=dynamo_db.AttributeType.STRING), sort_key=dynamo_db.Attribute( name="sk", type=dynamo_db.AttributeType.STRING)) ### # Lambda Functions ### # We need Booking and Cancellation functions for our 3 services # # All functions need access to our DynamoDB table above. # We also need to take payment for this trip # # 1) Flights # 2) Hotel # 3) Payment # 1) Flights reserve_flight_lambda = self.create_lambda( scope=self, lambda_id="reserveFlightLambdaHandler", handler='flights/reserveFlight.handler', table=table) confirm_flight_lambda = self.create_lambda( scope=self, lambda_id="confirmFlightLambdaHandler", handler='flights/confirmFlight.handler', table=table) cancel_flight_lambda = self.create_lambda( scope=self, lambda_id="cancelFlightLambdaHandler", handler='flights/cancelFlight.handler', table=table) # 2) Hotel reserve_hotel_lambda = self.create_lambda( scope=self, lambda_id="reserveHotelLambdaHandler", handler='hotel/reserveHotel.handler', table=table) confirm_hotel_lambda = self.create_lambda( scope=self, lambda_id="confirmHotelLambdaHandler", handler='hotel/confirmHotel.handler', table=table) cancel_hotel_lambda = self.create_lambda( scope=self, lambda_id="cancelHotelLambdaHandler", handler='hotel/cancelHotel.handler', table=table) # 3) Payment For Holiday take_payment_lambda = self.create_lambda( scope=self, lambda_id="takePaymentLambdaHandler", handler='payment/takePayment.handler', table=table) refund_payment_lambda = self.create_lambda( scope=self, lambda_id="refundPaymentLambdaHandler", handler='payment/refundPayment.handler', table=table) ### # Saga Pattern Step Function ### # Follows a strict order: # 1) Reserve Flights and Hotel # 2) Take Payment # 3) Confirm Flight and Hotel booking # Our two end states booking_succeeded = step_fn.Succeed(self, 'We have made your booking!') booking_failed = step_fn.Fail(self, "Sorry, We Couldn't make the booking") # 1) Reserve Flights and Hotel cancel_hotel_reservation = step_fn.Task( self, 'CancelHotelReservation', task=step_fn_tasks.RunLambdaTask(cancel_hotel_lambda), result_path='$.CancelHotelReservationResult').add_retry( max_attempts=3).next(booking_failed) reserve_hotel = step_fn.Task( self, 'ReserveHotel', task=step_fn_tasks.RunLambdaTask(reserve_hotel_lambda), result_path='$.ReserveHotelResult').add_catch( cancel_hotel_reservation, result_path="$.ReserveHotelError") cancel_flight_reservation = step_fn.Task( self, 'CancelFlightReservation', task=step_fn_tasks.RunLambdaTask(cancel_flight_lambda), result_path='$.CancelFlightReservationResult').add_retry( max_attempts=3).next(cancel_hotel_reservation) reserve_flight = step_fn.Task( self, 'ReserveFlight', task=step_fn_tasks.RunLambdaTask(reserve_flight_lambda), result_path='$.ReserveFlightResult').add_catch( cancel_flight_reservation, result_path="$.ReserveFlightError") # 2) Take Payment refund_payment = step_fn.Task( self, 'RefundPayment', task=step_fn_tasks.RunLambdaTask(refund_payment_lambda), result_path='$.RefundPaymentResult').add_retry( max_attempts=3).next(cancel_flight_reservation) take_payment = step_fn.Task( self, 'TakePayment', task=step_fn_tasks.RunLambdaTask(take_payment_lambda), result_path='$.TakePaymentResult').add_catch( refund_payment, result_path="$.TakePaymentError") # 3) Confirm Flight and Hotel Booking confirm_hotel = step_fn.Task( self, 'ConfirmHotelBooking', task=step_fn_tasks.RunLambdaTask(confirm_hotel_lambda), result_path='$.ConfirmHotelBookingResult').add_catch( refund_payment, result_path="$.ConfirmHotelBookingError") confirm_flight = step_fn.Task( self, 'ConfirmFlight', task=step_fn_tasks.RunLambdaTask(confirm_flight_lambda), result_path='$.ConfirmFlightResult').add_catch( refund_payment, result_path="$.ConfirmFlightError") definition = step_fn.Chain \ .start(reserve_hotel) \ .next(reserve_flight) \ .next(take_payment) \ .next(confirm_hotel) \ .next(confirm_flight) \ .next(booking_succeeded) saga = step_fn.StateMachine(self, 'BookingSaga', definition=definition, timeout=core.Duration.minutes(5)) # defines an AWS Lambda resource to connect to our API Gateway and kick # off our step function saga_lambda = _lambda.Function( self, "sagaLambdaHandler", runtime=_lambda.Runtime.NODEJS_12_X, handler="sagaLambda.handler", code=_lambda.Code.from_asset("lambdas"), environment={'statemachine_arn': saga.state_machine_arn}) saga.grant_start_execution(saga_lambda) # defines an API Gateway REST API resource backed by our "stateMachineLambda" function. api_gw.LambdaRestApi(self, 'SagaPatternSingleTable', handler=saga_lambda)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) API_ARN = self.node.try_get_context("api_arn") RATE = self.node.try_get_context("rate") if not API_ARN or not RATE: logger.error( f"Required context variables for {id} were not provided!") else: # Create the WAF IPSets doslist = wafv2.CfnIPSet( self, "Ext06DosIpSet", addresses=[], ip_address_version="IPV4", scope="REGIONAL", name="Ext06DosIpSet", ) suslist = wafv2.CfnIPSet( self, "Ext06SusIpSet", addresses=[], ip_address_version="IPV4", scope="REGIONAL", name="Ext06SusIpSet", ) # Create a WAF waf = wafv2.CfnWebACL( self, id="Ext06_WAF", name="Ext06-WAF", default_action=wafv2.CfnWebACL.DefaultActionProperty(allow={}), scope="REGIONAL", visibility_config=wafv2.CfnWebACL.VisibilityConfigProperty( cloud_watch_metrics_enabled=True, metric_name="EXT06_WAF", sampled_requests_enabled=True), rules=[], ) # Create Susunban lambda lambda_dir_path = os.path.join(os.getcwd(), "ir_cdk_stacks", "ext_06") susunban_lambda = _lambda.Function( self, "Ext06ResponseSusUnbanFunction", runtime=_lambda.Runtime.PYTHON_3_8, handler="susunban_lambda.lambda_handler", code=_lambda.Code.from_asset(lambda_dir_path), environment={ "ipset_id": suslist.attr_id, "ipset_name": suslist.name, "ipset_scope": suslist.scope, }) # Assign WAF permissions to lambda susunban_lambda.add_to_role_policy( iam.PolicyStatement( actions=["wafv2:GetIPSet", "wafv2:UpdateIPSet"], effect=iam.Effect.ALLOW, resources=[suslist.attr_arn], )) # Create Dosunban lambda lambda_dir_path = os.path.join(os.getcwd(), "ir_cdk_stacks", "ext_06") dosunban_lambda = _lambda.Function( self, "Ext06ResponseDosUnbanFunction", runtime=_lambda.Runtime.PYTHON_3_8, handler="dosunban_lambda.lambda_handler", code=_lambda.Code.from_asset(lambda_dir_path), environment={ "ipset_id": doslist.attr_id, "ipset_name": doslist.name, "ipset_scope": doslist.scope, }) # Assign WAF permissions to lambda dosunban_lambda.add_to_role_policy( iam.PolicyStatement( actions=["wafv2:GetIPSet", "wafv2:UpdateIPSet"], effect=iam.Effect.ALLOW, resources=[doslist.attr_arn], )) # Create dos stepfunction # Define a second state machine to unban the blacklisted IP after 1 hour doswait_step = sfn.Wait( self, "Ext06ResponseStepDosWait", time=sfn.WaitTime.duration(core.Duration.hours(1)), ) suswait_step = sfn.Wait( self, "Ext06ResponseStepSusWait", time=sfn.WaitTime.duration(core.Duration.hours(1)), ) dosunban_step = sfn.Task( self, "Ext06ResponseStepDosUnban", task=tasks.RunLambdaTask( dosunban_lambda, integration_pattern=sfn.ServiceIntegrationPattern. FIRE_AND_FORGET, payload={"Input.$": "$"}, ), ) susunban_step = sfn.Task( self, "Ext06ResponseStepSosUnban", task=tasks.RunLambdaTask( susunban_lambda, integration_pattern=sfn.ServiceIntegrationPattern. FIRE_AND_FORGET, payload={"Input.$": "$"}, ), ) dos_statemachine = sfn.StateMachine( self, "Ext06ResponseDosUnbanStateMachine", definition=doswait_step.next(dosunban_step), timeout=core.Duration.hours(1.5), ) sus_statemachine = sfn.StateMachine( self, "Ext06ResponseSusUnbanStateMachine", definition=suswait_step.next(susunban_step), timeout=core.Duration.hours(1.5), ) # Create lambda function lambda_func = _lambda.Function( self, "Ext06ResponseFunction", runtime=_lambda.Runtime.PYTHON_3_8, handler="response_lambda.lambda_handler", code=_lambda.Code.from_asset(lambda_dir_path), environment={ "suslist_id": suslist.attr_id, "suslist_name": suslist.name, "suslist_scope": suslist.scope, "doslist_id": doslist.attr_id, "doslist_name": doslist.name, "doslist_scope": doslist.scope, "dos_arn": dos_statemachine.state_machine_arn, "sus_arn": sus_statemachine.state_machine_arn, }, ) kinesis_log = s3.Bucket( self, id='dos_logs', access_control=s3.BucketAccessControl.PUBLIC_READ_WRITE, ) # Assign permissions to response lambda lambda_func.add_to_role_policy( iam.PolicyStatement( actions=[ "wafv2:GetIPSet", "wafv2:UpdateIPSet", "states:StartExecution", "s3:GetObject", ], effect=iam.Effect.ALLOW, resources=[ doslist.attr_arn, suslist.attr_arn, sus_statemachine.state_machine_arn, dos_statemachine.state_machine_arn, kinesis_log.bucket_arn, kinesis_log.bucket_arn, kinesis_log.bucket_arn + "/*" ], )) # Create an IAM role for the steram stream_role = iam.Role( self, id="waf-kinesis-log-role", assumed_by=iam.ServicePrincipal( service="firehose.amazonaws.com", ), ) stream_permissions = iam.Policy( self, id="Ext-06-kinesis-permissions", statements=[ iam.PolicyStatement( actions=[ "s3:AbortMultipartUpload", "s3:GetBucketLocation", "s3:GetObject", "s3:ListBucket", "s3:ListBucketMultipartUploads", "s3:PutObject", ], effect=iam.Effect.ALLOW, resources=[ kinesis_log.bucket_arn, kinesis_log.bucket_arn + "/*" ], ) ]) stream_role.attach_inline_policy(stream_permissions) log_stream = firehose.CfnDeliveryStream( self, id="aws-waf-logs-ext06", delivery_stream_type="DirectPut", delivery_stream_name="aws-waf-logs-ext06", s3_destination_configuration=firehose.CfnDeliveryStream. S3DestinationConfigurationProperty( bucket_arn=kinesis_log.bucket_arn, buffering_hints=firehose.CfnDeliveryStream. BufferingHintsProperty(interval_in_seconds=300, size_in_m_bs=5), compression_format="UNCOMPRESSED", role_arn=stream_role.role_arn), ) kinesis_log.add_event_notification( s3.EventType.OBJECT_CREATED, dest=s3_notifications.LambdaDestination(lambda_func)) utc_time = datetime.now(tz=timezone.utc) utc_time = utc_time + timedelta(minutes=5) cron_string = "cron(" + str(utc_time.minute) + " " + str( utc_time.hour) + " " + str(utc_time.day) + " " + str( utc_time.month) + " ? " + str(utc_time.year) + ")" trigger = events.Rule( self, id="ext-06 setup", rule_name="Ext06-trigger", schedule=events.Schedule.expression(cron_string)) setup_dir_path = os.path.join(os.getcwd(), "ir_cdk_stacks", "ext_06") setup_func = _lambda.Function( self, id="Ext06Setup", runtime=_lambda.Runtime.PYTHON_3_8, handler="setup.lambda_handler", code=_lambda.Code.from_asset(setup_dir_path), environment={ "waf_arn": waf.attr_arn, "waf_id": waf.attr_id, "waf_scope": waf.scope, "waf_name": waf.name, "firehose_arn": log_stream.attr_arn, "rule_name": "Ext06-trigger", "doslist_arn": doslist.attr_arn, "rate": str(RATE), }, ) # Assign permissions to setup lambda setup_func.add_to_role_policy( iam.PolicyStatement( actions=[ "wafv2:PutLoggingConfiguration", "wafv2:GetWebACL", "wafv2:UpdateWebACL" ], effect=iam.Effect.ALLOW, resources=[waf.attr_arn, doslist.attr_arn], )) setup = targets.LambdaFunction(handler=setup_func, ) setup.bind(rule=trigger) trigger.add_target(target=setup) wafv2.CfnWebACLAssociation( self, id="API gateway association", resource_arn=API_ARN, web_acl_arn=waf.attr_arn, )
def __init__(self, scope: core.Construct, id: str, QueueDefine="default",TaskDefine="default",LambdaDefine="default", SNSDefine="default",**kwargs): super().__init__(scope, id, **kwargs) self.Job_String_Split = _sfn.Task( self,"String_Split", input_path = "$.TaskInfo", result_path = "$.JobDetail.String_Split", output_path = "$", task = _sfn_tasks.RunBatchJob( job_name = "String_Split", job_definition = TaskDefine.getTaskDefine("String_Split"), job_queue = QueueDefine.getComputeQueue("ComputeQueue"), container_overrides = _sfn_tasks.ContainerOverrides( environment = { "INPUT_BUCKET":_sfn.Data.string_at("$.BasicParameters.INPUT_BUCKET"), "INPUT_KEY":_sfn.Data.string_at("$.BasicParameters.INPUT_KEY"), "OUTPUT_BUCKET":_sfn.Data.string_at("$.BasicParameters.OUTPUT_BUCKET"), "OUTPUT_KEY":_sfn.Data.string_at("$.JobParameter.String_Split.OUTPUT_KEY"), "SPLIT_NUM":_sfn.Data.string_at("$.JobParameter.String_Split.SPLIT_NUM") } ) ) ) self.Job_Map = _sfn.Task( self,"Job_Map", input_path = "$.TaskInfo", result_path = "$.TaskInfo.JobDetail.Job_Map", output_path = "$", task = _sfn_tasks.RunLambdaTask(LambdaDefine.getLambdaFunction("Get_Job_List")), ) self.Job_String_Reverse = _sfn.Task( self,"String_Reverse", input_path = "$", result_path = "$", output_path = "$", task = _sfn_tasks.RunBatchJob( job_name = "String_Reverse", job_definition = TaskDefine.getTaskDefine("String_Reverse"), job_queue = QueueDefine.getComputeQueue("ComputeQueue"), container_overrides = _sfn_tasks.ContainerOverrides( environment = { "INDEX":_sfn.Data.string_at("$.INDEX"), "INPUT_BUCKET":_sfn.Data.string_at("$.INPUT_BUCKET"), "INPUT_KEY":_sfn.Data.string_at("$.INPUT_KEY"), "OUTPUT_BUCKET":_sfn.Data.string_at("$.OUTPUT_BUCKET"), "OUTPUT_KEY":_sfn.Data.string_at("$.String_Reverse.OUTPUT_KEY") } ) ) ) self.Job_String_Repeat = _sfn.Task( self,"String_Repeat", input_path = "$", result_path = "$", output_path = "$", task = _sfn_tasks.RunBatchJob( job_name = "String_Repeat", job_definition = TaskDefine.getTaskDefine("String_Repeat"), job_queue = QueueDefine.getComputeQueue("ComputeQueue"), container_overrides = _sfn_tasks.ContainerOverrides( environment = { "INDEX":_sfn.Data.string_at("$.INDEX"), "INPUT_BUCKET":_sfn.Data.string_at("$.INPUT_BUCKET"), "INPUT_KEY":_sfn.Data.string_at("$.INPUT_KEY"), "OUTPUT_BUCKET":_sfn.Data.string_at("$.OUTPUT_BUCKET"), "OUTPUT_KEY":_sfn.Data.string_at("$.String_Repeat.OUTPUT_KEY") } ) ) ) self.Job_String_Process_Repeat = _sfn.Map( self, "String_Process_Repeat", max_concurrency=50, input_path = "$.TaskInfo.JobDetail.Job_Map", result_path = "DISCARD", items_path = "$.Payload", output_path = "$", ).iterator(self.Job_String_Repeat) self.Job_String_Repeat_Merge = _sfn.Task( self,"String_Repeat_Merge", input_path = "$.TaskInfo", result_path = "DISCARD", output_path = "$", task = _sfn_tasks.RunBatchJob( job_name = "String_Repeat_Merge", job_definition = TaskDefine.getTaskDefine("String_Merge"), job_queue = QueueDefine.getComputeQueue("ComputeQueue"), container_overrides = _sfn_tasks.ContainerOverrides( environment = { "PERFIX":_sfn.Data.string_at("$.JobParameter.String_Repeat.Prefix"), "FILE_NAME":_sfn.Data.string_at("$.BasicParameters.INPUT_KEY"), "INPUT_BUCKET":_sfn.Data.string_at("$.BasicParameters.INPUT_BUCKET"), "INPUT_KEY":_sfn.Data.string_at("$.JobParameter.String_Repeat.OUTPUT_KEY"), "OUTPUT_BUCKET":_sfn.Data.string_at("$.BasicParameters.OUTPUT_BUCKET"), "OUTPUT_KEY":_sfn.Data.string_at("$.JobParameter.String_Repeat.OUTPUT_KEY") } ) ) ) self.Job_String_Process_Repeat.next(self.Job_String_Repeat_Merge) self.Job_String_Process_Reverse = _sfn.Map( self, "String_Process_Reverse", max_concurrency=50, input_path = "$.TaskInfo.JobDetail.Job_Map", result_path = "DISCARD", items_path = "$.Payload", output_path = "$", ).iterator(self.Job_String_Reverse) self.Job_String_Reverse_Merge = _sfn.Task( self,"String_Reverse_Merge", input_path = "$.TaskInfo", result_path = "DISCARD", output_path = "$", task = _sfn_tasks.RunBatchJob( job_name = "String_Reverse_Merge", job_definition = TaskDefine.getTaskDefine("String_Merge"), job_queue = QueueDefine.getComputeQueue("ComputeQueue"), container_overrides = _sfn_tasks.ContainerOverrides( environment = { "PERFIX":_sfn.Data.string_at("$.JobParameter.String_Reverse.Prefix"), "FILE_NAME":_sfn.Data.string_at("$.BasicParameters.INPUT_KEY"), "INPUT_BUCKET":_sfn.Data.string_at("$.BasicParameters.INPUT_BUCKET"), "INPUT_KEY":_sfn.Data.string_at("$.JobParameter.String_Reverse.OUTPUT_KEY"), "OUTPUT_BUCKET":_sfn.Data.string_at("$.BasicParameters.OUTPUT_BUCKET"), "OUTPUT_KEY":_sfn.Data.string_at("$.JobParameter.String_Reverse.OUTPUT_KEY") } ) ) ) self.Job_String_Process_Reverse.next(self.Job_String_Reverse_Merge) self.Job_Parallel_Process = _sfn.Parallel( self, 'Parallel_Process', input_path = "$", result_path = "DISCARD" ) self.Job_Parallel_Process.branch(self.Job_String_Process_Repeat) self.Job_Parallel_Process.branch(self.Job_String_Process_Reverse) self.Job_Check_Output = _sfn.Task( self,"Check_Output", input_path = "$.TaskInfo", result_path = "$.JobDetail.Check_Output", output_path = "$.JobDetail.Check_Output.Payload", task = _sfn_tasks.RunLambdaTask(LambdaDefine.getLambdaFunction("Get_Output_size")), ) self.Job_Is_Complete = _sfn.Choice( self, "Is_Complete", input_path = "$.TaskInfo", output_path = "$" ) self.Job_Finish = _sfn.Wait( self, "Finish", time = _sfn.WaitTime.duration(core.Duration.seconds(5)) ) self.Job_Notification = _sfn.Task(self, "Notification", input_path = "$.TaskInfo", result_path = "DISCARD", output_path = "$", task = _sfn_tasks.PublishToTopic(SNSDefine.getSNSTopic("Topic_Batch_Job_Notification"), integration_pattern = _sfn.ServiceIntegrationPattern.FIRE_AND_FORGET, message = _sfn.TaskInput.from_data_at("$.JobStatus.Job_Comment"), subject = _sfn.Data.string_at("$.JobStatus.SNS_Subject") ) ) self.Job_Failed = _sfn.Wait( self, "Failed", time = _sfn.WaitTime.duration(core.Duration.seconds(5)) ) self.statemachine = _sfn.StateMachine( self, "StateMachine", definition = self.Job_String_Split.next(self.Job_Map) \ .next(self.Job_Parallel_Process) \ .next(self.Job_Check_Output) \ .next(self.Job_Notification) \ .next(self.Job_Is_Complete \ .when(_sfn.Condition.string_equals( "$.JobStatus.OutputStatus", "FAILED" ), self.Job_Failed .next(self.Job_Map) ) .when(_sfn.Condition.string_equals( "$.JobStatus.OutputStatus", "SUCCEEDED" ), self.Job_Finish) .otherwise(self.Job_Failed) ), timeout = core.Duration.hours(1), )
def __init__(self, scope: core.Construct, id: str, id_checker: str, event_bus: str, stage: Optional[str] = 'prod', **kwargs) -> None: super().__init__(scope, id + '-' + stage, **kwargs) app_table_name = id + '-applications-table-' + stage app_table = ddb.Table(self, id=app_table_name, table_name=app_table_name, partition_key=ddb.Attribute( name='id', type=ddb.AttributeType.STRING), billing_mode=ddb.BillingMode.PAY_PER_REQUEST) events_table_name = id + '-events-table-' + stage events_table = ddb.Table(self, id=events_table_name, table_name=events_table_name, partition_key=ddb.Attribute( name='id', type=ddb.AttributeType.STRING), billing_mode=ddb.BillingMode.PAY_PER_REQUEST, stream=ddb.StreamViewType.NEW_IMAGE) self._table_stream_arn = events_table.table_stream_arn # create our Lambda function for the bank account service func_name = id + '-' + stage + '-' + 'account-application' lambda_assets = lambda_.Code.from_asset('account_application_service') handler = lambda_.Function(self, func_name, code=lambda_assets, runtime=lambda_.Runtime.NODEJS_10_X, handler='main.handler', environment={ 'ACCOUNTS_TABLE_NAME': app_table.table_name, 'EVENTS_TABLE_NAME': events_table.table_name, 'REGION': core.Aws.REGION }) gw.LambdaRestApi(self, id=stage + '-' + id, handler=handler) # grant main Lambda function access to DynamoDB tables app_table.grant_read_write_data(handler.role) events_table.grant_read_write_data(handler.role) p_statement = iam.PolicyStatement(actions=[ 'ssm:Describe*', 'ssm:Get*', 'ssm:List*', 'events:*', 'states:*' ], effect=iam.Effect.ALLOW, resources=['*']) handler.add_to_role_policy(statement=p_statement) # create the Lambda function for the event publisher evt_publisher = id + '-' + stage + '-' + 'event-publisher' evt_handler = lambda_.Function( self, evt_publisher, code=lambda_assets, runtime=lambda_.Runtime.NODEJS_10_X, handler='event-publisher.handler', events=[ lambda_es.DynamoEventSource( table=events_table, starting_position=lambda_.StartingPosition.LATEST) ], environment={ 'EVENT_BRIDGE_ARN': event_bus, 'REGION': core.Aws.REGION }) evt_handler.add_to_role_policy(statement=p_statement) # set up StepFunctions approve_application = sf.Task( self, 'Approve Application', task=sft.InvokeFunction(handler, payload={ 'body': { 'command': 'APPROVE_ACCOUNT_APPLICATION', 'data': { 'id.$': '$.application.id' } } }), result_path='$.approveApplication') reject_application = sf.Task(self, 'Reject Application', task=sft.InvokeFunction( handler, payload={ 'body': { 'command': 'REJECT_ACCOUNT_APPLICATION', 'data': { 'id.$': '$.application.id' } } }), result_path='$.rejectApplication') id_checker_handler = lambda_.Function.from_function_arn( self, 'IdentityChecker', function_arn=id_checker) check_identity = sf.Task(self, 'Check Identity', task=sft.InvokeFunction( id_checker_handler, payload={ 'body': { 'command': 'CHECK_IDENTITY', 'data': { 'application.$': '$.application' } } })) wait_for_human_review = sf.Task(self, 'Wait for Human Review', task=sft.RunLambdaTask(handler, integration_pattern=sf.ServiceIntegrationPattern.WAIT_FOR_TASK_TOKEN, payload={ 'body': { 'command': 'FLAG_ACCOUNT_APPLICATION_FOR_HUMAN_REVIEW', 'data': { 'id.$': '$.application.id', 'taskToken': sf.Context.task_token } } }), result_path='$.humanReview') \ .next( sf.Choice(self, 'Human Approval Choice') .when(sf.Condition.string_equals('$.humanReview.decision', 'APPROVE'), next=approve_application) .when(sf.Condition.string_equals('$.humanReview.decision', 'REJECT'), next=reject_application)) sm_definition = sf.Parallel(self, 'Perform Automated Checks', result_path='$.checks') \ .branch(check_identity) \ .branch(sf.Pass(self, 'Check Fraud Model', result=sf.Result({'flagged': False}))) \ .next( sf.Choice(self, 'Automated Checks Choice') .when(sf.Condition.boolean_equals('$.checks[0].flagged', True), next=wait_for_human_review) .when(sf.Condition.boolean_equals('$.checks[1].flagged', True), next=wait_for_human_review) .otherwise(approve_application)) state_machine = sf.StateMachine(self, 'OpenAccountStateMachine' + stage, definition=sm_definition) ssm.CfnParameter(self, id='StateMachineArnSSM', type='String', value=state_machine.state_machine_arn, name='StateMachineArnSSM')
def __init__(self, app: core.App, id: str, props, **kwargs) -> None: super().__init__(app, id, **kwargs) run_data_bucket_name = '' run_data_bucket = s3.Bucket.from_bucket_name( self, run_data_bucket_name, bucket_name=run_data_bucket_name) # IAM roles for the lambda functions lambda_role = iam.Role( self, 'EchoTesLambdaRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaBasicExecutionRole') ]) copy_lambda_role = iam.Role( self, 'CopyToS3LambdaRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaBasicExecutionRole') ]) run_data_bucket.grant_write(copy_lambda_role) callback_role = iam.Role( self, 'CallbackTesLambdaRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaBasicExecutionRole'), iam.ManagedPolicy.from_aws_managed_policy_name( 'AWSStepFunctionsFullAccess') ]) # Lambda function to call back and complete SFN async tasks lmbda.Function(self, 'CallbackLambda', function_name='callback_iap_tes_lambda_dev', handler='callback.lambda_handler', runtime=lmbda.Runtime.PYTHON_3_7, code=lmbda.Code.from_asset('lambdas'), role=callback_role, timeout=core.Duration.seconds(20)) samplesheet_mapper_function = lmbda.Function( self, 'SampleSheetMapperTesLambda', function_name='showcase_ss_mapper_iap_tes_lambda_dev', handler='launch_tes_task.lambda_handler', runtime=lmbda.Runtime.PYTHON_3_7, code=lmbda.Code.from_asset('lambdas'), role=lambda_role, timeout=core.Duration.seconds(20), environment={ 'IAP_API_BASE_URL': props['iap_api_base_url'], 'TASK_ID': props['task_id'], 'TASK_VERSION': 'tvn.0ee81865bf514b7bb7b7ea305c88191f', # 'TASK_VERSION': 'tvn.b4735419fbe4455eb2b91960e48921f9', # echo task 'SSM_PARAM_JWT': props['ssm_param_name'], 'GDS_LOG_FOLDER': props['gds_log_folder'], 'IMAGE_NAME': 'umccr/alpine_pandas', 'IMAGE_TAG': '1.0.1', 'TES_TASK_NAME': 'SampleSheetMapper' }) bcl_convert_function = lmbda.Function( self, 'BclConvertTesLambda', function_name='showcase_bcl_convert_iap_tes_lambda_dev', handler='launch_tes_task.lambda_handler', runtime=lmbda.Runtime.PYTHON_3_7, code=lmbda.Code.from_asset('lambdas'), role=lambda_role, timeout=core.Duration.seconds(20), environment={ 'IAP_API_BASE_URL': props['iap_api_base_url'], 'TASK_ID': props['task_id'], 'TASK_VERSION': 'tvn.ab3e85f9aaf24890ad169fdab3825c0d', # 'TASK_VERSION': 'tvn.b4735419fbe4455eb2b91960e48921f9', # echo task 'SSM_PARAM_JWT': props['ssm_param_name'], 'GDS_LOG_FOLDER': props['gds_log_folder'], 'IMAGE_NAME': '699120554104.dkr.ecr.us-east-1.amazonaws.com/public/dragen', 'IMAGE_TAG': '3.5.2', 'TES_TASK_NAME': 'BclConvert' }) fastq_mapper_function = lmbda.Function( self, 'FastqMapperTesLambda', function_name='showcase_fastq_mapper_iap_tes_lambda_dev', handler='launch_tes_task.lambda_handler', runtime=lmbda.Runtime.PYTHON_3_7, code=lmbda.Code.from_asset('lambdas'), role=lambda_role, timeout=core.Duration.seconds(20), environment={ 'IAP_API_BASE_URL': props['iap_api_base_url'], 'TASK_ID': props['task_id'], 'TASK_VERSION': 'tvn.f90aa88da2fe490fb6e6366b65abe267', # 'TASK_VERSION': 'tvn.b4735419fbe4455eb2b91960e48921f9', # echo task 'SSM_PARAM_JWT': props['ssm_param_name'], 'GDS_LOG_FOLDER': props['gds_log_folder'], 'IMAGE_NAME': 'umccr/alpine_pandas', 'IMAGE_TAG': '1.0.1', 'TES_TASK_NAME': 'FastqMapper' }) gather_samples_function = lmbda.Function( self, 'GatherSamplesTesLambda', function_name='showcase_gather_samples_iap_tes_lambda_dev', handler='gather_samples.lambda_handler', runtime=lmbda.Runtime.PYTHON_3_7, code=lmbda.Code.from_asset('lambdas'), role=lambda_role, timeout=core.Duration.seconds(20), environment={ 'IAP_API_BASE_URL': props['iap_api_base_url'], 'SSM_PARAM_JWT': props['ssm_param_name'] }) dragen_function = lmbda.Function( self, 'DragenTesLambda', function_name='showcase_dragen_iap_tes_lambda_dev', handler='launch_tes_task.lambda_handler', runtime=lmbda.Runtime.PYTHON_3_7, code=lmbda.Code.from_asset('lambdas'), role=lambda_role, timeout=core.Duration.seconds(20), environment={ 'IAP_API_BASE_URL': props['iap_api_base_url'], 'TASK_ID': props['task_id'], 'TASK_VERSION': 'tvn.096b39e90e4443abae0333e23fcabc61', # 'TASK_VERSION': 'tvn.b4735419fbe4455eb2b91960e48921f9', # echo task 'SSM_PARAM_JWT': props['ssm_param_name'], 'GDS_LOG_FOLDER': props['gds_log_folder'], 'IMAGE_NAME': '699120554104.dkr.ecr.us-east-1.amazonaws.com/public/dragen', 'IMAGE_TAG': '3.5.2', 'TES_TASK_NAME': 'Dragen' }) multiqc_function = lmbda.Function( self, 'MultiQcTesLambda', function_name='showcase_multiqc_iap_tes_lambda_dev', handler='launch_tes_task.lambda_handler', runtime=lmbda.Runtime.PYTHON_3_7, code=lmbda.Code.from_asset('lambdas'), role=lambda_role, timeout=core.Duration.seconds(20), environment={ 'IAP_API_BASE_URL': props['iap_api_base_url'], 'TASK_ID': props['task_id'], 'TASK_VERSION': 'tvn.983a0239483d4253a8a0531fa1de0376', # 'TASK_VERSION': 'tvn.b4735419fbe4455eb2b91960e48921f9', # echo task 'SSM_PARAM_JWT': props['ssm_param_name'], 'GDS_LOG_FOLDER': props['gds_log_folder'], 'IMAGE_NAME': 'umccr/multiqc_dragen', 'IMAGE_TAG': '1.1', 'TES_TASK_NAME': 'MultiQC' }) copy_report_to_s3 = lmbda.Function( self, 'CopyReportToS3Lambda', function_name='showcase_copy_report_lambda_dev', handler='copy_to_s3.lambda_handler', runtime=lmbda.Runtime.PYTHON_3_7, code=lmbda.Code.from_asset('lambdas'), role=copy_lambda_role, timeout=core.Duration.seconds(20), environment={ 'IAP_API_BASE_URL': props['iap_api_base_url'], 'SSM_PARAM_JWT': props['ssm_param_name'], 'GDS_RUN_VOLUME': props['gds_run_volume'], 'S3_RUN_BUCKET': props['s3_run_bucket'] }) # IAP JWT access token stored in SSM Parameter Store secret_value = ssm.StringParameter.from_secure_string_parameter_attributes( self, "JwtToken", parameter_name=props['ssm_param_name'], version=props['ssm_param_version']) secret_value.grant_read(samplesheet_mapper_function) secret_value.grant_read(bcl_convert_function) secret_value.grant_read(fastq_mapper_function) secret_value.grant_read(gather_samples_function) secret_value.grant_read(dragen_function) secret_value.grant_read(multiqc_function) secret_value.grant_read(copy_report_to_s3) # SFN task definitions task_samplesheet_mapper = sfn.Task( self, "SampleSheetMapper", task=sfn_tasks.RunLambdaTask( samplesheet_mapper_function, integration_pattern=sfn.ServiceIntegrationPattern. WAIT_FOR_TASK_TOKEN, payload={ "taskCallbackToken": sfn.Context.task_token, "runId.$": "$.runfolder" }), result_path="$.guid") task_bcl_convert = sfn.Task( self, "BclConvert", task=sfn_tasks.RunLambdaTask( bcl_convert_function, integration_pattern=sfn.ServiceIntegrationPattern. WAIT_FOR_TASK_TOKEN, payload={ "taskCallbackToken": sfn.Context.task_token, "runId.$": "$.runfolder" }), result_path="$.guid") task_fastq_mapper = sfn.Task( self, "FastqMapper", task=sfn_tasks.RunLambdaTask( fastq_mapper_function, integration_pattern=sfn.ServiceIntegrationPattern. WAIT_FOR_TASK_TOKEN, payload={ "taskCallbackToken": sfn.Context.task_token, "runId.$": "$.runfolder" }), result_path="$.guid") task_gather_samples = sfn.Task(self, "GatherSamples", task=sfn_tasks.InvokeFunction( gather_samples_function, payload={"runId.$": "$.runfolder"}), result_path="$.sample_ids") task_dragen = sfn.Task( self, "DragenTask", task=sfn_tasks.RunLambdaTask( dragen_function, integration_pattern=sfn.ServiceIntegrationPattern. WAIT_FOR_TASK_TOKEN, payload={ "taskCallbackToken": sfn.Context.task_token, "runId.$": "$.runId", "index.$": "$.index", "item.$": "$.item" }), result_path="$.exit_status") task_multiqc = sfn.Task( self, "MultiQcTask", task=sfn_tasks.RunLambdaTask( multiqc_function, integration_pattern=sfn.ServiceIntegrationPattern. WAIT_FOR_TASK_TOKEN, payload={ "taskCallbackToken": sfn.Context.task_token, "runId.$": "$.runfolder", "samples.$": "$.sample_ids" })) task_copy_report_to_s3 = sfn.Task( self, "CopyReportToS3", task=sfn_tasks.InvokeFunction(copy_report_to_s3, payload={"runId.$": "$.runfolder"}), result_path="$.copy_report") scatter = sfn.Map(self, "Scatter", items_path="$.sample_ids", parameters={ "index.$": "$$.Map.Item.Index", "item.$": "$$.Map.Item.Value", "runId.$": "$.runfolder" }, result_path="$.mapresults", max_concurrency=20).iterator(task_dragen) definition = task_samplesheet_mapper \ .next(task_bcl_convert) \ .next(task_fastq_mapper) \ .next(task_gather_samples) \ .next(scatter) \ .next(task_multiqc) \ .next(task_copy_report_to_s3) sfn.StateMachine( self, "ShowcaseSfnStateMachine", definition=definition, )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) CLUSTER_NAME = self.node.try_get_context("cluster_name") NOTIFY_EMAIL = self.node.try_get_context("notify_email") SLACK_WEBHOOK_URL = self.node.try_get_context("webhook_url") if not CLUSTER_NAME or not NOTIFY_EMAIL or not SLACK_WEBHOOK_URL: logger.error( f"Required context variables for {id} were not provided!") else: # Get the log group of our postgres instance log_group = logs.LogGroup.from_log_group_name( self, "InAur01DetectionLogGroup", f"/aws/rds/cluster/{CLUSTER_NAME}/postgresql", ) # Create new metric metric = cloudwatch.Metric( namespace="LogMetrics", metric_name="InAur01DetectionFailedDbLoginAttempts", ) # Apply metric filter # Filter all metrics of failed login attempts in log logs.MetricFilter( self, "InAur01DetectionMetricFilter", log_group=log_group, metric_namespace=metric.namespace, metric_name=metric.metric_name, filter_pattern=logs.FilterPattern.all_terms( "FATAL: password authentication failed for user"), metric_value="1", ) # Create new SNS topic topic = sns.Topic(self, "InAur01DetectionTopic") # Add email subscription topic.add_subscription(subs.EmailSubscription(NOTIFY_EMAIL)) # Create new alarm for metric # Alarm will trigger if there is >= 10 failed login attempts # over a period of 30 seconds. alarm = cloudwatch.Alarm( self, "InAur01DetectionAlarm", metric=metric, threshold=10, evaluation_periods=1, period=core.Duration.seconds(30), datapoints_to_alarm=1, statistic="sum", ) # Add SNS action to alarm alarm.add_alarm_action(cw_actions.SnsAction(topic)) # Create unban lambda lambda_dir_path = os.path.join(os.getcwd(), "ir_cdk_stacks", "in_aur_01") unban_lambda = _lambda.Function( self, "InAur01ResponseUnbanFunction", runtime=_lambda.Runtime.PYTHON_3_8, handler="unban_lambda.lambda_handler", code=_lambda.Code.from_asset(lambda_dir_path), ) # Assign EC2 permissions to lambda unban_lambda.add_to_role_policy( iam.PolicyStatement( actions=["ec2:DeleteNetworkAclEntry"], effect=iam.Effect.ALLOW, resources=["*"], )) # Create stepfunction # Define a second state machine to unban the blacklisted IP after 1 hour wait_step = sfn.Wait( self, "InAur01ResponseStepWait", time=sfn.WaitTime.duration(core.Duration.hours(1)), ) unban_step = sfn.Task( self, "InAur01ResponseStepUnban", task=tasks.RunLambdaTask( unban_lambda, integration_pattern=sfn.ServiceIntegrationPattern. FIRE_AND_FORGET, ), parameters={"Payload.$": "$"}, ) statemachine = sfn.StateMachine( self, "InAur01ResponseUnbanStateMachine", definition=wait_step.next(unban_step), timeout=core.Duration.hours(1.5), ) # Create lambda function lambda_func = _lambda.Function( self, "InAur01ResponseFunction", runtime=_lambda.Runtime.PYTHON_3_8, handler="response_lambda.lambda_handler", code=_lambda.Code.from_asset(lambda_dir_path), environment={ "webhook_url": SLACK_WEBHOOK_URL, "unban_sm_arn": statemachine.state_machine_arn, "cluster_name": CLUSTER_NAME, }, ) # AWS CDK has a bug where it would not add the correct permission # to the lambda for Cloudwatch log subscription to invoke it. # Hence, we need to manually add permission to lambda. lambda_func.add_permission( "InAur01ResponseFunctionInvokePermission", principal=iam.ServicePrincipal("logs.amazonaws.com"), action="lambda:InvokeFunction", source_arn=log_group.log_group_arn + ":*", ) # Assign permissions to response lambda lambda_func.add_to_role_policy( iam.PolicyStatement( actions=[ "states:StartExecution", ], effect=iam.Effect.ALLOW, resources=[statemachine.state_machine_arn], )) # Assign RDS Read-only permissions to lambda lambda_func.add_to_role_policy( iam.PolicyStatement( actions=["rds:Describe*"], effect=iam.Effect.ALLOW, resources=["*"], )) # Assign EC2 permissions to lambda lambda_func.add_to_role_policy( iam.PolicyStatement( actions=[ "ec2:Describe*", "ec2:CreateNetworkAclEntry", "ec2:DeleteNetworkAclEntry", ], effect=iam.Effect.ALLOW, resources=["*"], )) # Assign CloudWatch logs permissions to lambda lambda_func.add_to_role_policy( iam.PolicyStatement( actions=[ "cloudwatch:Get*", "cloudwatch:Describe*", "logs:FilterLogEvents", "logs:DescribeMetricFilters", ], effect=iam.Effect.ALLOW, resources=["*"], )) sns_event_source = lambda_event_sources.SnsEventSource(topic) lambda_func.add_event_source(sns_event_source)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) API_ARN = self.node.try_get_context("api_arn") if not API_ARN: logger.error( f"Required context variables for {id} were not provided!") else: # Create XSS rule xss_body = wafv2.CfnRuleGroup.StatementOneProperty( xss_match_statement=wafv2.CfnRuleGroup. XssMatchStatementProperty( field_to_match=BODY, text_transformations=[NO_TEXT_TRANSFORMATION])) xss_query_string = wafv2.CfnRuleGroup.StatementOneProperty( xss_match_statement=wafv2.CfnRuleGroup. XssMatchStatementProperty( field_to_match=QUERY_STRING, text_transformations=[NO_TEXT_TRANSFORMATION])) xss_uri = wafv2.CfnRuleGroup.StatementOneProperty( xss_match_statement=wafv2.CfnRuleGroup. XssMatchStatementProperty( field_to_match=URI_PATH, text_transformations=[NO_TEXT_TRANSFORMATION])) xss_header = wafv2.CfnRuleGroup.StatementOneProperty( xss_match_statement=wafv2.CfnRuleGroup. XssMatchStatementProperty( field_to_match=SINGLE_HEADER, text_transformations=[NO_TEXT_TRANSFORMATION])) xss_rule_group = wafv2.CfnRuleGroup( self, id="XSS", capacity=160, scope="REGIONAL", visibility_config=wafv2.CfnRuleGroup.VisibilityConfigProperty( cloud_watch_metrics_enabled=True, metric_name="xss_attacks", sampled_requests_enabled=False), rules=[ wafv2.CfnRuleGroup.RuleProperty( name="xss_query_string", priority=1, statement=xss_query_string, action=wafv2.CfnRuleGroup.RuleActionProperty(block={}), visibility_config=wafv2.CfnRuleGroup. VisibilityConfigProperty( cloud_watch_metrics_enabled=False, metric_name="xss_attacks", sampled_requests_enabled=False), ), wafv2.CfnRuleGroup.RuleProperty( name="xss_body", priority=2, statement=xss_body, action=wafv2.CfnRuleGroup.RuleActionProperty(block={}), visibility_config=wafv2.CfnRuleGroup. VisibilityConfigProperty( cloud_watch_metrics_enabled=False, metric_name="xss_attacks", sampled_requests_enabled=False)), wafv2.CfnRuleGroup.RuleProperty( name="xss_uri", priority=3, statement=xss_uri, action=wafv2.CfnRuleGroup.RuleActionProperty(block={}), visibility_config=wafv2.CfnRuleGroup. VisibilityConfigProperty( cloud_watch_metrics_enabled=False, metric_name="xss_attacks", sampled_requests_enabled=False)), wafv2.CfnRuleGroup.RuleProperty( name="xss_header", priority=4, statement=xss_header, action=wafv2.CfnRuleGroup.RuleActionProperty(block={}), visibility_config=wafv2.CfnRuleGroup. VisibilityConfigProperty( cloud_watch_metrics_enabled=False, metric_name="xss_attacks", sampled_requests_enabled=False), ), ], ) # Create the SQLI rule group sqli_body = wafv2.CfnRuleGroup.StatementOneProperty( sqli_match_statement=wafv2.CfnRuleGroup. SqliMatchStatementProperty( field_to_match=BODY, text_transformations=[NO_TEXT_TRANSFORMATION])) sqli_query_string = wafv2.CfnRuleGroup.StatementOneProperty( sqli_match_statement=wafv2.CfnRuleGroup. SqliMatchStatementProperty( field_to_match=QUERY_STRING, text_transformations=[NO_TEXT_TRANSFORMATION])) sqli_uri = wafv2.CfnRuleGroup.StatementOneProperty( sqli_match_statement=wafv2.CfnRuleGroup. SqliMatchStatementProperty( field_to_match=URI_PATH, text_transformations=[NO_TEXT_TRANSFORMATION])) sqli_header = wafv2.CfnRuleGroup.StatementOneProperty( sqli_match_statement=wafv2.CfnRuleGroup. SqliMatchStatementProperty( field_to_match=SINGLE_HEADER, text_transformations=[NO_TEXT_TRANSFORMATION])) sqli_rule_group = wafv2.CfnRuleGroup( self, id="SQLI", capacity=80, scope="REGIONAL", visibility_config=wafv2.CfnRuleGroup.VisibilityConfigProperty( cloud_watch_metrics_enabled=True, metric_name="sqli_attacks", sampled_requests_enabled=False), rules=[ wafv2.CfnRuleGroup.RuleProperty( name="sqli_query_string", priority=1, statement=sqli_query_string, action=wafv2.CfnRuleGroup.RuleActionProperty(block={}), visibility_config=wafv2.CfnRuleGroup. VisibilityConfigProperty( cloud_watch_metrics_enabled=False, metric_name="sqli_attacks", sampled_requests_enabled=False), ), wafv2.CfnRuleGroup.RuleProperty( name="sqli_body", priority=2, statement=sqli_body, action=wafv2.CfnRuleGroup.RuleActionProperty(block={}), visibility_config=wafv2.CfnRuleGroup. VisibilityConfigProperty( cloud_watch_metrics_enabled=False, metric_name="sqli_attacks", sampled_requests_enabled=False)), wafv2.CfnRuleGroup.RuleProperty( name="sqli_uri", priority=3, statement=sqli_uri, action=wafv2.CfnRuleGroup.RuleActionProperty(block={}), visibility_config=wafv2.CfnRuleGroup. VisibilityConfigProperty( cloud_watch_metrics_enabled=False, metric_name="sqli_attacks", sampled_requests_enabled=False)), wafv2.CfnRuleGroup.RuleProperty( name="sqli_header", priority=4, statement=sqli_header, action=wafv2.CfnRuleGroup.RuleActionProperty(block={}), visibility_config=wafv2.CfnRuleGroup. VisibilityConfigProperty( cloud_watch_metrics_enabled=False, metric_name="sqli_attacks", sampled_requests_enabled=False), ), ], ) # Create the LFI and path traversal sets regex_pattern_set = wafv2.CfnRegexPatternSet( self, id="Ext01LptSet", regular_expression_list=[".*\.\./.*", ".*://.*"], scope="REGIONAL") lpt_query_string = wafv2.CfnRuleGroup.StatementOneProperty( regex_pattern_set_reference_statement=wafv2.CfnRuleGroup. RegexPatternSetReferenceStatementProperty( arn=regex_pattern_set.attr_arn, field_to_match=QUERY_STRING, text_transformations=[NO_TEXT_TRANSFORMATION])) lpt_uri = wafv2.CfnRuleGroup.StatementOneProperty( regex_pattern_set_reference_statement=wafv2.CfnRuleGroup. RegexPatternSetReferenceStatementProperty( arn=regex_pattern_set.attr_arn, field_to_match=URI_PATH, text_transformations=[NO_TEXT_TRANSFORMATION])) lpt_rule_group = wafv2.CfnRuleGroup( self, id="LPT", capacity=50, scope="REGIONAL", visibility_config=wafv2.CfnRuleGroup.VisibilityConfigProperty( cloud_watch_metrics_enabled=True, metric_name="lpt_attacks", sampled_requests_enabled=False), rules=[ wafv2.CfnRuleGroup.RuleProperty( name="lpt_query_string", priority=1, statement=lpt_query_string, action=wafv2.CfnRuleGroup.RuleActionProperty(block={}), visibility_config=wafv2.CfnRuleGroup. VisibilityConfigProperty( cloud_watch_metrics_enabled=False, metric_name="lpt_attacks", sampled_requests_enabled=False), ), wafv2.CfnRuleGroup.RuleProperty( name="lpt_uri", priority=2, statement=lpt_uri, action=wafv2.CfnRuleGroup.RuleActionProperty(block={}), visibility_config=wafv2.CfnRuleGroup. VisibilityConfigProperty( cloud_watch_metrics_enabled=False, metric_name="lpt_attacks", sampled_requests_enabled=False)), ], ) # Create new WAF IPSet blacklist = wafv2.CfnIPSet( self, "Ext01ResponseIpSet", addresses=[], ip_address_version="IPV4", scope="REGIONAL", name="Ext01ResponseIpSet", ) # Create reference statements xss_ref = wafv2.CfnWebACL.RuleGroupReferenceStatementProperty( arn=xss_rule_group.attr_arn) sqli_ref = wafv2.CfnWebACL.RuleGroupReferenceStatementProperty( arn=sqli_rule_group.attr_arn) lpt_ref = wafv2.CfnWebACL.RuleGroupReferenceStatementProperty( arn=lpt_rule_group.attr_arn) # Create a WAF waf = wafv2.CfnWebACL( self, id="Ext01_WAF", name="Ext01-WAF", default_action=wafv2.CfnWebACL.DefaultActionProperty(allow={}), scope="REGIONAL", visibility_config=wafv2.CfnWebACL.VisibilityConfigProperty( cloud_watch_metrics_enabled=True, metric_name="EXT01_WAF", sampled_requests_enabled=True), rules=[ wafv2.CfnWebACL.RuleProperty( name="SQLI", priority=2, statement=wafv2.CfnWebACL.StatementOneProperty( rule_group_reference_statement=sqli_ref), visibility_config=wafv2.CfnWebACL. VisibilityConfigProperty( cloud_watch_metrics_enabled=False, metric_name="sqli_requests", sampled_requests_enabled=False), override_action=wafv2.CfnWebACL.OverrideActionProperty( none={}), ), wafv2.CfnWebACL.RuleProperty( name="XSS", priority=3, statement=wafv2.CfnWebACL.StatementOneProperty( rule_group_reference_statement=xss_ref), visibility_config=wafv2.CfnWebACL. VisibilityConfigProperty( cloud_watch_metrics_enabled=False, metric_name="xss_requests", sampled_requests_enabled=False), override_action=wafv2.CfnWebACL.OverrideActionProperty( none={}), ), wafv2.CfnWebACL.RuleProperty( name="LPT", priority=4, statement=wafv2.CfnWebACL.StatementOneProperty( rule_group_reference_statement=lpt_ref), visibility_config=wafv2.CfnWebACL. VisibilityConfigProperty( cloud_watch_metrics_enabled=False, metric_name="lpt_requests", sampled_requests_enabled=False), override_action=wafv2.CfnWebACL.OverrideActionProperty( none={}), ), ], ) # Create unban lambda lambda_dir_path = os.path.join(os.getcwd(), "ir_cdk_stacks", "ext_01") unban_lambda = _lambda.Function( self, "Ext01ResponseUnbanFunction", runtime=_lambda.Runtime.PYTHON_3_8, handler="unban_lambda.lambda_handler", code=_lambda.Code.from_asset(lambda_dir_path), environment={ "ipset_id": blacklist.attr_id, "ipset_name": blacklist.name, "ipset_scope": blacklist.scope, }) # Assign WAF permissions to lambda unban_lambda.add_to_role_policy( iam.PolicyStatement( actions=["wafv2:GetIPSet", "wafv2:UpdateIPSet"], effect=iam.Effect.ALLOW, resources=[blacklist.attr_arn], )) # Create stepfunction # Define a second state machine to unban the blacklisted IP after 1 hour wait_step = sfn.Wait( self, "Ext01ResponseStepWait", time=sfn.WaitTime.duration(core.Duration.hours(1)), ) unban_step = sfn.Task( self, "Ext01ResponseStepUnban", task=tasks.RunLambdaTask( unban_lambda, integration_pattern=sfn.ServiceIntegrationPattern. FIRE_AND_FORGET, payload={"Input.$": "$"}), ) statemachine = sfn.StateMachine( self, "Ext01ResponseUnbanStateMachine", definition=wait_step.next(unban_step), timeout=core.Duration.hours(1.5), ) # Create lambda function lambda_func = _lambda.Function( self, "Ext01ResponseFunction", runtime=_lambda.Runtime.PYTHON_3_8, handler="response_lambda.lambda_handler", code=_lambda.Code.from_asset(lambda_dir_path), environment={ "ipset_id": blacklist.attr_id, "ipset_name": blacklist.name, "ipset_scope": blacklist.scope, "sfn_arn": statemachine.state_machine_arn, }, ) kinesis_log = s3.Bucket( self, id='waf_logs', access_control=s3.BucketAccessControl.PUBLIC_READ_WRITE, ) # Assign permissions to response lambda lambda_func.add_to_role_policy( iam.PolicyStatement( actions=[ "wafv2:GetIPSet", "wafv2:UpdateIPSet", "states:StartExecution", "s3:GetObject", ], effect=iam.Effect.ALLOW, resources=[ blacklist.attr_arn, statemachine.state_machine_arn, kinesis_log.bucket_arn, kinesis_log.bucket_arn, kinesis_log.bucket_arn + "/*" ], )) # Create an IAM role for the steram stream_role = iam.Role( self, id="waf-kinesis-log-role", assumed_by=iam.ServicePrincipal( service="firehose.amazonaws.com", ), ) stream_permissions = iam.Policy( self, id="Ext-01-kinesis-permissions", statements=[ iam.PolicyStatement( actions=[ "s3:AbortMultipartUpload", "s3:GetBucketLocation", "s3:GetObject", "s3:ListBucket", "s3:ListBucketMultipartUploads", "s3:PutObject", ], effect=iam.Effect.ALLOW, resources=[ kinesis_log.bucket_arn, kinesis_log.bucket_arn + "/*" ], ) ]) stream_role.attach_inline_policy(stream_permissions) log_stream = firehose.CfnDeliveryStream( self, id="aws-waf-logs-ext01", delivery_stream_type="DirectPut", delivery_stream_name="aws-waf-logs-ext01", s3_destination_configuration=firehose.CfnDeliveryStream. S3DestinationConfigurationProperty( bucket_arn=kinesis_log.bucket_arn, buffering_hints=firehose.CfnDeliveryStream. BufferingHintsProperty(interval_in_seconds=120, size_in_m_bs=5), compression_format="UNCOMPRESSED", role_arn=stream_role.role_arn), ) kinesis_log.add_event_notification( s3.EventType.OBJECT_CREATED, dest=s3_notifications.LambdaDestination(lambda_func)) utc_time = datetime.now(tz=timezone.utc) utc_time = utc_time + timedelta(minutes=5) cron_string = "cron(" + str(utc_time.minute) + " " + str( utc_time.hour) + " " + str(utc_time.day) + " " + str( utc_time.month) + " ? " + str(utc_time.year) + ")" trigger = events.Rule( self, id="ext-01 setup", rule_name="Ext01-trigger", schedule=events.Schedule.expression(cron_string)) setup_dir_path = os.path.join(os.getcwd(), "ir_cdk_stacks", "ext_01") setup_func = _lambda.Function( self, id="Ext01Setup", runtime=_lambda.Runtime.PYTHON_3_8, handler="setup.lambda_handler", code=_lambda.Code.from_asset(setup_dir_path), environment={ "waf_arn": waf.attr_arn, "waf_id": waf.attr_id, "waf_scope": waf.scope, "waf_name": waf.name, "firehose_arn": log_stream.attr_arn, "rule_name": "Ext01-trigger", "blacklist_arn": blacklist.attr_arn, }, ) # Assign permissions to setup lambda setup_func.add_to_role_policy( iam.PolicyStatement( actions=[ "wafv2:PutLoggingConfiguration", "wafv2:GetWebACL", "wafv2:UpdateWebACL" ], effect=iam.Effect.ALLOW, resources=[ waf.attr_arn, blacklist.attr_arn, xss_rule_group.attr_arn, sqli_rule_group.attr_arn, lpt_rule_group.attr_arn ], )) setup = targets.LambdaFunction(handler=setup_func, ) setup.bind(rule=trigger) trigger.add_target(target=setup) wafv2.CfnWebACLAssociation( self, id="API gateway association", resource_arn=API_ARN, web_acl_arn=waf.attr_arn, )