Exemplo n.º 1
0
    def __init__(self, scope: core.Construct, **kwargs) -> None:
        self.deploy_env = active_environment
        super().__init__(scope, id=f'{self.deploy_env.value}-data-lake-stack', **kwargs)

        self.data_lake_raw_bucket = BaseDataLakeBucket(
            self,
            deploy_env=self.deploy_env,
            layer=DataLakeLayer.RAW
        )

        self.data_lake_raw_bucket.add_lifecycle_rule(
            transitions=[
                s3.Transition(
                    storage_class=s3.StorageClass.INTELLIGENT_TIERING,
                    transition_after=core.Duration.days(90)
                ),
                s3.Transition(
                    storage_class=s3.StorageClass.GLACIER,
                    transition_after=core.Duration.days(360)
                ),
            ]
        )

        self.data_lake_processed_bucket = BaseDataLakeBucket(
            self,
            deploy_env=self.deploy_env,
            layer=DataLakeLayer.PROCESSED
        )

        self.data_lake_curated_bucket = BaseDataLakeBucket(
            self,
            deploy_env=self.deploy_env,
            layer=DataLakeLayer.CURATED
        )
    def get_data_lake_raw(self):
        bucket = DataLakeBucket(self, environment=self.env, layer=Layer.RAW)
        bucket.add_lifecycle_rule(transitions=[
            s3.Transition(storage_class=s3.StorageClass.INTELLIGENT_TIERING,
                          transition_after=core.Duration.days(90)),
            s3.Transition(storage_class=s3.StorageClass.GLACIER,
                          transition_after=core.Duration.days(360))
        ],
                                  enabled=True)

        database = DataLakeDatabase(self, bucket=bucket)

        return bucket, database
Exemplo n.º 3
0
  def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
    super().__init__(scope, id, **kwargs)

    ZachS3Bucket = s3.Bucket(self, 'ZachS3Bucket',versioned=False,
                             removal_policy=core.RemovalPolicy.DESTROY,
                             # block_public_access=s3.BlockPublicAccess()
                             )
    ZachS3BucketAllowMethod = [s3.HttpMethods.GET, s3.HttpMethods.POST]
    ZachS3Bucket.add_cors_rule(allowed_methods=ZachS3BucketAllowMethod
                               , allowed_origins=['192.168.0.0/24'])
    ZachS3BucketLongTermStorageClass = s3.StorageClass.INFREQUENT_ACCESS
    ZachS3Bucket.add_lifecycle_rule(enabled=True,
                                    expiration=core.Duration.days(42),
                                    prefix="Zach-tsjr",
                                    tag_filters={"Environment": "Dev", "FileType": "Log"},
                                    transitions=[s3.Transition(storage_class=ZachS3BucketLongTermStorageClass,
                                                               transition_after=core.Duration.days(30))]
                                    )
    s3_logger_handler=lm.Function(
      self, "ZachS3BucketLambda",
      runtime=lm.Runtime.PYTHON_3_7,
      handler="s3recorder.handler",
      code=lm.Code.asset('aws_cdk_python\Zach_Lambda_Stack'),
      environment={
        "S3_BUCKET": ZachS3Bucket.bucket_name
      }
    )
    notify_lambda = s3n.LambdaDestination(s3_logger_handler)
    ZachS3Bucket.add_event_notification(s3.EventType.OBJECT_CREATED,notify_lambda)
    # ZachS3Bucket.grant_write(notify_lambda)
    core.CfnOutput(self, id="ZachS3BucketARN", value=ZachS3Bucket.bucket_arn)
    core.CfnOutput(self,id="ZachS3BucketName",value=ZachS3Bucket.bucket_name)
    core.CfnOutput(self,id="ZachS3BucketOverview",value=ZachS3Bucket.to_string())
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # create s3 bucket for athena data
        self.s3_bucket = s3.Bucket(
            self,
            "s3_bucket",
            public_read_access=False,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            removal_policy=core.RemovalPolicy.DESTROY,
            lifecycle_rules=[
                s3.LifecycleRule(
                    # delete the files after 1800 days (5 years)
                    expiration=core.Duration.days(1800),
                    transitions=[
                        # move files into glacier after 90 days
                        s3.Transition(
                            transition_after=core.Duration.days(90),
                            storage_class=s3.StorageClass.GLACIER,
                        )
                    ],
                )
            ],
        )
        # tag the bucket
        core.Tag.add(self.s3_bucket, "project", constants["PROJECT_TAG"])

        # lambda policies
        athena_bucket_empty_policy = [
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                actions=["s3:ListBucket"],
                resources=["*"],
            ),
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                actions=[
                    "s3:DeleteObject",
                ],
                resources=[f"{self.s3_bucket.bucket_arn}/*"],
            ),
        ]

        # create the custom resource
        athena_bucket_empty = CustomResource(
            self,
            "athena_bucket_empty",
            PhysicalId="athenaBucketEmpty",
            Description="Empty athena s3 bucket",
            Uuid="f7d4f730-4ee1-11e8-9c2d-fa7ae01bbebc",
            HandlerPath=os.path.join(dirname, "../helpers/s3_bucket_empty.py"),
            BucketName=self.s3_bucket.bucket_name,
            ResourcePolicies=athena_bucket_empty_policy,
        )
        # needs a dependancy
        athena_bucket_empty.node.add_dependency(self.s3_bucket)
    def __init__(self, scope: core.Construct, id: str, bucket_name: str,
                 uuid: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        bucket_storage = _s3.LifecycleRule(transitions=[
            _s3.Transition(storage_class=_s3.StorageClass.INTELLIGENT_TIERING,
                           transition_after=core.Duration.days(1))
        ])

        self.__bucket = _s3.Bucket(self,
                                   'S3Bucket',
                                   bucket_name=bucket_name,
                                   removal_policy=core.RemovalPolicy.DESTROY,
                                   encryption=_s3.BucketEncryption.KMS_MANAGED,
                                   lifecycle_rules=[bucket_storage])

        with open('common/common_cdk/lambda/empty_bucket.py', 'r') as f:
            lambda_source = f.read()

        empty_bucket_lambda = _lambda.SingletonFunction(
            self,
            'EmptyBucketLambda',
            uuid=uuid,
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            timeout=core.Duration.minutes(15))

        empty_bucket_lambda.role.add_to_policy(
            _iam.PolicyStatement(actions=[
                's3:DeleteObject', 's3:DeleteObjectVersion',
                's3:ListBucketVersions', 's3:ListBucket'
            ],
                                 resources=[
                                     self.__bucket.bucket_arn + '/*',
                                     self.__bucket.bucket_arn
                                 ]))

        empty_bucket_lambda_provider = _custom_resources.Provider(
            self,
            'EmptyBucketLambdaProvider',
            on_event_handler=empty_bucket_lambda)

        custom_resource = core.CustomResource(
            self,
            'EmptyBucketCustomResource',
            service_token=empty_bucket_lambda_provider.service_token,
            properties={"bucket_name": self.__bucket.bucket_name})

        custom_resource.node.add_dependency(self.__bucket)
Exemplo n.º 6
0
    def __init__(self, scope: core.Construct, id: str, sample_data_path: str,
                 bucket_name: str, **kwargs) -> None:

        super().__init__(scope, id, **kwargs)

        # Creates a sample S3 bucket with a lifecycle rule
        sample_bucket = s3.Bucket(
            self,
            f'{id}.s3.bucket',
            bucket_name=bucket_name,
            lifecycle_rules=[
                s3.LifecycleRule(
                    enabled=True,
                    id=f'{id}.s3.lifecyclerule',
                    transitions=[
                        s3.Transition(
                            storage_class=s3.StorageClass.INTELLIGENT_TIERING,

                            # Note that this transition may take 24-48 hours, but you will be charged as if the transition happens instantly.
                            transition_after=core.Duration.days(1),

                            # You can also set the transition time to a specific date:
                            #transition_date=datetime.datetime(
                            #	year=2021,
                            #	month=5,
                            #	day=17,
                            #	tzinfo=pytz.timezone("US/Eastern")
                            #),
                        )
                    ])
            ],
            removal_policy=core.RemovalPolicy.DESTROY)

        # Uploads some sample data to the S3 bucket to for testing storage class transitions
        s3_deployment.BucketDeployment(
            self,
            f'{id}.s3deployment.sampledata',
            destination_bucket=sample_bucket,
            sources=[s3_deployment.Source.asset(path=sample_data_path)])
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # load configs from "./comfigurations/config.json"
        configs = {}
        with open("./configurations/config.json") as json_file:
            configs = json.load(json_file)

        # Default lambdas for testing
        mem_list = configs['MemorySizeList']
        cold_start_lambdas = {}
        for mem in mem_list:
            python38_lambda = lambda_.Function(
                self,
                id="coldstart_python38_" + str(mem) + "_",
                runtime=lambda_.Runtime.PYTHON_3_8,
                handler="lambda_function.lambda_handler",
                memory_size=mem,
                tracing=lambda_.Tracing.ACTIVE,
                code=lambda_.Code.asset("./cold_start_lambdas/python38"))
            cold_start_lambdas['PYTHON38_' + str(mem)] = python38_lambda

        for mem in mem_list:
            nodejs12x_lambda = lambda_.Function(
                self,
                id="coldstart_nodejs12x" + str(mem) + "_",
                runtime=lambda_.Runtime.NODEJS_12_X,
                handler="index.handler",
                memory_size=mem,
                tracing=lambda_.Tracing.ACTIVE,
                code=lambda_.Code.asset("./cold_start_lambdas/nodejs12x"))
            cold_start_lambdas['NODEJS12X_' + str(mem)] = nodejs12x_lambda

        for mem in mem_list:
            go1x_lambda = lambda_.Function(
                self,
                id="coldstart_go1x" + str(mem) + "_",
                runtime=lambda_.Runtime.GO_1_X,
                handler="hello",
                memory_size=mem,
                tracing=lambda_.Tracing.ACTIVE,
                code=lambda_.Code.asset("./cold_start_lambdas/go1x"))
            cold_start_lambdas['GO1X_' + str(mem)] = go1x_lambda

        for mem in mem_list:
            netcore31_lambda = lambda_.Function(
                self,
                id="coldstart_netcore31" + str(mem) + "_",
                runtime=lambda_.Runtime.DOTNET_CORE_3_1,
                handler="LambdaTest::LambdaTest.LambdaHandler::handleRequest",
                tracing=lambda_.Tracing.ACTIVE,
                code=lambda_.Code.asset("./cold_start_lambdas/netcore31"),
                memory_size=mem,
            )
            cold_start_lambdas['NETCORE31_' + str(mem)] = netcore31_lambda

        for mem in mem_list:
            java11corretto_lambda = lambda_.Function(
                self,
                id="coldstart_java11corretto" + str(mem) + "_",
                runtime=lambda_.Runtime.JAVA_11,
                handler="example.Hello::handleRequest",
                memory_size=mem,
                tracing=lambda_.Tracing.ACTIVE,
                code=lambda_.Code.asset("./cold_start_lambdas/java11corretto"))
            cold_start_lambdas['JAVA11_' + str(mem)] = java11corretto_lambda

        for mem in mem_list:
            ruby27_lambda = lambda_.Function(
                self,
                id="coldstart_ruby27" + str(mem) + "_",
                runtime=lambda_.Runtime.RUBY_2_7,
                handler="lambda_function.lambda_handler",
                memory_size=mem,
                tracing=lambda_.Tracing.ACTIVE,
                code=lambda_.Code.asset("./cold_start_lambdas/ruby27"))
            cold_start_lambdas['RUBY27_' + str(mem)] = ruby27_lambda

        # Caller
        cold_start_caller = lambda_.Function(
            self,
            id="cold_start_caller",
            runtime=lambda_.Runtime.PYTHON_3_8,
            handler="ColdStartCaller.lambda_handler",
            code=lambda_.Code.asset("./cold_start_lambdas/cold_start_caller"),
            timeout=core.Duration.seconds(180))
        cold_start_caller.role.add_managed_policy(
            iam_.ManagedPolicy.from_aws_managed_policy_name(
                "AWSXrayReadOnlyAccess"))
        cold_start_caller.role.add_to_policy(
            iam_.PolicyStatement(effect=iam_.Effect.ALLOW,
                                 actions=['lambda:GetFunctionConfiguration'],
                                 resources=["*"]))
        for lambda_name in cold_start_lambdas:
            cold_start_caller.add_environment(
                lambda_name, cold_start_lambdas[lambda_name].function_arn)
            cold_start_lambdas[lambda_name].grant_invoke(cold_start_caller)

        # DynamoDB
        cold_start_table = dynamodb_.Table(
            self,
            id="cold_start_benchmark_table",
            partition_key=dynamodb_.Attribute(
                name="PK", type=dynamodb_.AttributeType.STRING),
            sort_key=dynamodb_.Attribute(name="SK",
                                         type=dynamodb_.AttributeType.NUMBER),
            time_to_live_attribute="TTL")
        cold_start_table.grant_write_data(cold_start_caller)
        cold_start_caller.add_environment('TABLE_NAME',
                                          cold_start_table.table_name)

        # S3
        life_cycle_rule = s3_.LifecycleRule(transitions=[
            s3_.Transition(storage_class=s3_.StorageClass.INFREQUENT_ACCESS,
                           transition_after=core.Duration.days(30))
        ])
        cold_start_backup_s3 = s3_.Bucket(self,
                                          "cold_start_benchmark_backup",
                                          lifecycle_rules=[life_cycle_rule])
        cold_start_backup_s3.grant_write(cold_start_caller)
        cold_start_caller.add_environment('BACKUP_BUCKET_NAME',
                                          cold_start_backup_s3.bucket_name)

        # CW event
        cron_job = events_.Rule(
            self,
            "cold_start_caller_cron_job",
            description="Run cold start caller twice every 1 hour",
            schedule=events_.Schedule.cron(minute="0,1"),
            targets=[targets_.LambdaFunction(cold_start_caller)])

        # alarm when caller failed, send email for notification
        errorAlarm = cloudwatch_.Alarm(
            self,
            "cold_start_caller_error_alarm",
            metric=cloudwatch_.Metric(
                metric_name="Errors",
                namespace="AWS/Lambda",
                period=core.Duration.minutes(5),
                statistic="Maximum",
                dimensions={"FunctionName": cold_start_caller.function_name}),
            evaluation_periods=1,
            datapoints_to_alarm=1,
            threshold=1,
            actions_enabled=True,
            alarm_description="Alarm when cold start caller failed",
            alarm_name="cold_start_caller_errer_alarm",
            comparison_operator=cloudwatch_.ComparisonOperator.
            GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
            treat_missing_data=cloudwatch_.TreatMissingData.MISSING)
        cold_start_caller_error_alarm_topic = sns_.Topic(
            self,
            "cold_start_caller_error_alarm_topic",
            display_name="ColdStartCallerErrorAlarmTopic",
            topic_name="ColdStartCallerErrorAlarmTopic")
        cold_start_caller_error_alarm_topic.add_subscription(
            sns_subs_.EmailSubscription(
                configs['AlarmNotificationEmailAddress']))
        errorAlarm.add_alarm_action(
            cloudwatch_actions_.SnsAction(cold_start_caller_error_alarm_topic))

        # Summarizer
        cold_start_summarizer = lambda_.Function(
            self,
            id="cold_start_summarizer",
            runtime=lambda_.Runtime.PYTHON_3_8,
            handler="ColdStartSummarizer.lambda_handler",
            code=lambda_.Code.asset(
                "./cold_start_lambdas/cold_start_summarizer"),
            timeout=core.Duration.seconds(10))
        cold_start_table.grant_read_write_data(cold_start_summarizer)
        cold_start_summarizer.add_environment('TABLE_NAME',
                                              cold_start_table.table_name)

        # setup CW event for summarizer
        cron_job_summarizer = events_.Rule(
            self,
            "cold_start_summarizer_cron_job",
            description="Run cold start summarizer once every day",
            schedule=events_.Schedule.cron(minute='30', hour='0'),
            targets=[targets_.LambdaFunction(cold_start_summarizer)])

        # error alarm for summarizer
        errorAlarm_summarizer = cloudwatch_.Alarm(
            self,
            "cold_start_summarizer_error_alarm",
            metric=cloudwatch_.Metric(metric_name='Errors',
                                      namespace='AWS/Lambda',
                                      period=core.Duration.minutes(5),
                                      statistic='Maximum',
                                      dimensions={
                                          'FunctionName':
                                          cold_start_summarizer.function_name
                                      }),
            evaluation_periods=1,
            datapoints_to_alarm=1,
            threshold=1,
            actions_enabled=True,
            alarm_description="Alarm when cold start summarizer failed",
            alarm_name="cold_start_summarizer_errer_alarm",
            comparison_operator=cloudwatch_.ComparisonOperator.
            GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
            treat_missing_data=cloudwatch_.TreatMissingData.MISSING)
        cold_start_summarizer_error_alarm_topic = sns_.Topic(
            self,
            "cold_start_summarizer_error_alarm_topic",
            display_name="ColdStartSummarizerErrorAlarmTopic",
            topic_name="ColdStartSummarizerErrorAlarmTopic")
        cold_start_summarizer_error_alarm_topic.add_subscription(
            sns_subs_.EmailSubscription(
                configs['AlarmNotificationEmailAddress']))
        errorAlarm_summarizer.add_alarm_action(
            cloudwatch_actions_.SnsAction(
                cold_start_summarizer_error_alarm_topic))

        # GraphQL API
        graphql_api = appsync_.GraphqlApi(
            self,
            "cold_start_benchmark_graphql_api",
            name="cold_start_benchmark_graphql_api",
            authorization_config=appsync_.AuthorizationConfig(
                default_authorization=appsync_.AuthorizationMode(
                    authorization_type=appsync_.AuthorizationType.API_KEY,
                    api_key_config=appsync_.ApiKeyConfig(
                        description="cold_start_benchmark_graphql_api_key",
                        expires=core.Expiration.after(core.Duration.days(365)),
                        name="cold_start_benchmark_graphql_api_key"))),
            schema=appsync_.Schema.from_asset(
                './cold_start_benchmark/graphql_schema/schema.graphql'),
            xray_enabled=True)
        dynamodb_data_source = graphql_api.add_dynamo_db_data_source(
            id="cold_start_dynamodb_data_source", table=cold_start_table)
        dynamodb_data_source.create_resolver(
            field_name="listColdStartSummariesAfterTimestamp",
            type_name="Query",
            request_mapping_template=appsync_.MappingTemplate.from_file(
                './cold_start_benchmark/graphql_schema/request_mapping_template'
            ),
            response_mapping_template=appsync_.MappingTemplate.from_file(
                './cold_start_benchmark/graphql_schema/response_mapping_template'
            ))

        front_end_amplify_app = amplify_.App(
            self,
            "cold-start-front-end",
            app_name="cold_start_front_end",
            source_code_provider=amplify_.GitHubSourceCodeProvider(
                owner="ZzzGin",
                repository="cold-start-frontend-website",
                oauth_token=core.SecretValue.secrets_manager(
                    "zzzgin/github/token", json_field="zzzgin-github-token")))
        master_Branch = front_end_amplify_app.add_branch("master")
        domain = front_end_amplify_app.add_domain('zzzgin.com')
        domain.map_sub_domain(master_Branch, 'coldstart')
    def __init__(self, scope: core.Construct, id: str,
                 infra: RtspBaseResourcesConstruct, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create the inventory bucket...
        self.inventories = s3.Bucket(
            self,
            'InventoryBucket',
            bucket_name='homenet-{}.rtsp-inventories.{}.virtual.world'.format(
                infra.landing_zone.zone_name,
                core.Stack.of(self).region).lower(),
            removal_policy=core.RemovalPolicy.DESTROY,
            cors=[
                s3.CorsRule(allowed_methods=[s3.HttpMethods.GET],
                            allowed_origins=['*'])
            ],
            lifecycle_rules=[
                s3.LifecycleRule(
                    id='Transition-to-IA-after-30D',
                    prefix='eufy/',
                    abort_incomplete_multipart_upload_after=core.Duration.days(
                        7),
                    transitions=[
                        s3.Transition(
                            storage_class=s3.StorageClass.INFREQUENT_ACCESS,
                            transition_after=core.Duration.days(30))
                    ])
            ])

        # Create inventory collections for the Eufy Homebases...
        infra.bucket.add_inventory(
            objects_prefix='eufy/',
            inventory_id='{}-InventoryReport'.format('EufyFull'),
            format=s3.InventoryFormat.CSV,
            frequency=s3.InventoryFrequency.DAILY,
            include_object_versions=s3.InventoryObjectVersion.CURRENT,
            destination=s3.InventoryDestination(
                bucket=self.inventories,
                bucket_owner=core.Aws.ACCOUNT_ID,
                prefix=None))

        for base_name in ['Moonbase', 'Starbase']:
            prefix = 'eufy/{}.cameras.real.world/'.format(base_name).lower()
            infra.bucket.add_inventory(
                objects_prefix=prefix,
                inventory_id='{}-InventoryReport'.format(base_name),
                format=s3.InventoryFormat.CSV,
                frequency=s3.InventoryFrequency.DAILY,
                include_object_versions=s3.InventoryObjectVersion.CURRENT,
                destination=s3.InventoryDestination(
                    bucket=self.inventories,
                    bucket_owner=core.Aws.ACCOUNT_ID,
                    prefix=None))

        # Broadcast inventory creation events...
        self.inventoryAvailable = sns.Topic(
            self,
            'InventoryAvailable',
            display_name='HomeNet-{}-Rtsp-InventoryAvailable'.format(
                infra.landing_zone.zone_name),
            topic_name='HomeNet-{}-Rtsp-InventoryAvailable'.format(
                infra.landing_zone.zone_name))

        self.inventories.add_event_notification(
            s3.EventType.OBJECT_CREATED,
            s3n.SnsDestination(topic=self.inventoryAvailable),
            s3.NotificationKeyFilter(suffix='manifest.json'))

        # Attach debug queue to the notification
        self.inventoryAvailable.add_subscription(
            subs.SqsSubscription(
                raw_message_delivery=True,
                queue=sqs.Queue(
                    self,
                    'InventoryDebugQueue',
                    removal_policy=core.RemovalPolicy.DESTROY,
                    retention_period=core.Duration.days(7),
                    queue_name='HomeNet-{}-RtspInventoryAvailable_Debug'.
                    format(infra.landing_zone.zone_name))))

        # Subscribe the GroundTruth Manifest Generator
        groundtruth = RtspGroundTruthManifestGenerationFunction(
            self,
            'GroundTruthManifest',
            infra=infra,
            topic=self.inventoryAvailable)

        self.inventories.grant_read_write(groundtruth.function.role)

        # Create the RtspNormalizeImage S3 Object Lambda
        RtspNormalizeImageAccessPoint(scope=self,
                                      id='NormalizedImage',
                                      infra=infra)
 def __init__(self, scope: core.Construct, id: str,UserName="******",EmailAddress="default",**kwargs) -> None:
     super().__init__(scope, id, **kwargs)
     
     # vpc with one public subnet and one private subnet
     self.My_Vpc = _ec2.Vpc(self, "vpc-"+ UserName + "-batch",
         max_azs=2,
         nat_gateways=1,
         subnet_configuration=[
             _ec2.SubnetConfiguration(
                 subnet_type=_ec2.SubnetType.PUBLIC,
                 name="BatchENV",
                 cidr_mask=24
             ),
             _ec2.SubnetConfiguration(
                 cidr_mask=24,
                 name="InternalENV",
                 subnet_type=_ec2.SubnetType.PRIVATE
             )
         ]
     )
     
     # Definition Of S3 Bucket For Batch Computing
     self.My_S3_Bucket = _s3.Bucket(self,
         "s3bucket-" + UserName + "-batch",
         lifecycle_rules=[
             _s3.LifecycleRule(
                 # delete the files after 1800 days (5 years)
                 expiration=core.Duration.days(365),
                 transitions=[
                     # move files into glacier after 90 days
                     _s3.Transition(
                         transition_after=core.Duration.days(30),
                         storage_class=_s3.StorageClass.GLACIER
                     ),
                     _s3.Transition(
                         transition_after=core.Duration.days(120),
                         storage_class=_s3.StorageClass.DEEP_ARCHIVE
                     )
                 ],
             )
         ],
         removal_policy=core.RemovalPolicy.DESTROY
     )
     
     # Definition Of ECR Repo
     self.My_ECR_Repo = EcrENV(self,
         "ecr-" + UserName + "-batch",
         UserName=UserName
     )
     
     # Definition Of Batch ENV For Batch Computing
     self.My_Batch = BatchENV(self,
         "env-" + UserName + "-batch",
         CurrentVPC=self.My_Vpc,
         TargetS3=self.My_S3_Bucket,
         UserName=UserName
         
     )
     
     # Definition Of Batch Job 
     self.My_Batch_Task = BatchTASK(self,
         "task-" + UserName + "-batch",
         EcrRepo=self.My_ECR_Repo,
         UserName=UserName
     )
     
     # Definition Of Lambda Job 
     self.My_Lambda_Task = LambdaTask(self,
         "task-" + UserName + "-lambda",
         TargetS3=self.My_S3_Bucket
     )
     
     # Definition Of SNS Topic With Subscription 
     self.My_SNS = SnsENV(self,
         "sns-" + UserName + "-sfn",
         UserName=UserName,
         EmailAddress=EmailAddress
     )
     
     # Definition Of State Machine In Step functions  
     self.My_SFN = StepfunctionsENV(self,
         "statemachine-" + UserName + "-sfn",
         QueueDefine = self.My_Batch,
         TaskDefine = self.My_Batch_Task,
         LambdaDefine = self.My_Lambda_Task,
         SNSDefine = self.My_SNS
     )
     
     core.CfnOutput(self,
         "S3 Bucket For AWS Batch",
         value = self.My_S3_Bucket.bucket_name
     )
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id)

        lambda_dir = kwargs["lambda_dir"]

        # Note: A dead-letter queue is optional but it helps capture any failed messages
        dlq = sqs.Queue(self,
                        id="dead_letter_queue_id",
                        retention_period=Duration.days(7))
        dead_letter_queue = sqs.DeadLetterQueue(max_receive_count=1, queue=dlq)

        upload_queue = sqs.Queue(self,
                                 id="sample_queue_id",
                                 visibility_timeout=Duration.seconds(30),
                                 dead_letter_queue=dead_letter_queue)

        sqs_subscription = sns_subs.SqsSubscription(upload_queue,
                                                    raw_message_delivery=True)

        upload_event_topic = sns.Topic(self, id="sample_sns_topic_id")

        # This binds the SNS Topic to the SQS Queue
        upload_event_topic.add_subscription(sqs_subscription)

        # Note: Lifecycle Rules are optional but are included here to keep costs
        #       low by cleaning up old files or moving them to lower cost storage options
        s3_bucket = s3.Bucket(
            self,
            id="sample_bucket_id",
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            versioned=True,
            lifecycle_rules=[
                s3.LifecycleRule(
                    enabled=True,
                    expiration=Duration.days(365),
                    transitions=[
                        s3.Transition(
                            storage_class=s3.StorageClass.INFREQUENT_ACCESS,
                            transition_after=Duration.days(30)),
                        s3.Transition(storage_class=s3.StorageClass.GLACIER,
                                      transition_after=Duration.days(90)),
                    ])
            ])

        # Note: If you don't specify a filter all uploads will trigger an event.
        #       Also, modifying the event type will handle other object operations
        # This binds the S3 bucket to the SNS Topic
        s3_bucket.add_event_notification(
            s3.EventType.OBJECT_CREATED_PUT,
            s3n.SnsDestination(upload_event_topic),
            s3.NotificationKeyFilter(prefix="uploads", suffix=".csv"))

        function = _lambda.Function(
            self,
            "lambda_function",
            runtime=_lambda.Runtime.PYTHON_3_9,
            handler="lambda_function.handler",
            code=_lambda.Code.from_asset(path=lambda_dir))

        # This binds the lambda to the SQS Queue
        invoke_event_source = lambda_events.SqsEventSource(upload_queue)
        function.add_event_source(invoke_event_source)

        # Examples of CloudFormation outputs
        CfnOutput(
            self,
            "UploadFileToS3Example",
            value="aws s3 cp <local-path-to-file> s3://{}/".format(
                s3_bucket.bucket_name),
            description=
            "Upload a file to S3 (using AWS CLI) to trigger the SQS chain",
        )
        CfnOutput(
            self,
            "UploadSqsQueueUrl",
            value=upload_queue.queue_url,
            description="Link to the SQS Queue triggered on S3 uploads",
        )
        CfnOutput(
            self,
            "LambdaFunctionName",
            value=function.function_name,
        )
        CfnOutput(
            self,
            "LambdaFunctionLogGroupName",
            value=function.log_group.log_group_name,
        )