def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Create the queue
        MySqsQueue = sqs.Queue(self, "MySqsQueue")

        # Create the Topic
        MySnsTopic = sns.Topic(self, "MySnsTopic")

        # Create an SQS topic subscription object
        sqsSubscription = snssubs.SqsSubscription(MySqsQueue)

        # Add the SQS subscription to the sns topic
        MySnsTopic.add_subscription(sqsSubscription)

        # Add policy statement to SQS Policy that is created as part of the new queue
        iam.PolicyStatement(actions=['SQS:SendMessage'],
                            effect=iam.Effect.ALLOW,
                            conditions={'ArnEquals': MySnsTopic.topic_arn},
                            resources=[MySqsQueue.queue_arn],
                            principals=[
                                iam.ServicePrincipal('sns.amazonaws.com')
                            ]
                            )

        CfnOutput(self, "SQS queue name", description="SQS queue name", value=MySqsQueue.queue_name)
        CfnOutput(self, "SQS queue ARN", description="SQS queue arn", value=MySqsQueue.queue_arn)
        CfnOutput(self, "SQS queue URL", description="SQS queue URL", value=MySqsQueue.queue_url)
        CfnOutput(self, "SNS topic name", description="SNS topic name", value=MySnsTopic.topic_name)
        CfnOutput(self, "SNS topic ARN", description="SNS topic ARN", value=MySnsTopic.topic_arn)
Esempio n. 2
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        queue = sqs.Queue(
            self,
            "CbsappQueue",
            visibility_timeout=core.Duration.seconds(300),
        )

        topic = sns.Topic(self, "CbsappTopic")

        table = dynamodb.Table(self,
                               id='dynamoTable',
                               table_name='testcdktable',
                               partition_key=dynamodb.Attribute(
                                   name='lastname',
                                   type=dynamodb.AttributeType.STRING))

        function = _lambda.Function(scope=self,
                                    id='lambdafunction',
                                    function_name='_lambda',
                                    code=_lambda.Code.asset('lambdacode'),
                                    handler='lambdahandler.main',
                                    runtime=_lambda.Runtime.PROVIDED,
                                    memory_size=512,
                                    timeout=core.Duration.seconds(120))
        topic.add_subscription(subs.SqsSubscription(queue))
Esempio n. 3
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        queue = sqs.Queue(self,
                          "sqsqueue",
                          visibility_timeout=core.Duration.seconds(300))

        topic = sns.Topic(self, "snstopic")

        topic.add_subscription(subs.SqsSubscription(queue))

        bucket = s3.Bucket(
            self,
            "s3Bucket",
            encryption=s3.BucketEncryption.KMS_MANAGED,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        bucket.add_event_notification(s3.EventType.OBJECT_CREATED_PUT,
                                      s3n.SnsDestination(topic))

        s3deploy.BucketDeployment(
            self,
            "DeployFile",
            sources=[s3deploy.Source.asset("./assets")],
            destination_bucket=bucket,
            retain_on_delete=False,
        )
Esempio n. 4
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        queue = sqs.Queue(
            self,
            "PycdkworkshopQueue",
            visibility_timeout=core.Duration.seconds(300),
        )

        topic = sns.Topic(self, "PycdkworkshopTopic")

        bucket = s3.Bucket(self, id='s3cdkbucket', versioned=True)

        lambdafunction = _lambda.Function(
            self,
            id='lambdafunction',
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler='hello.handler',
            code=_lambda.Code.from_asset(path='lambdacode'))

        lambdaapi = api.LambdaRestApi(self,
                                      id='restapi',
                                      handler=lambdafunction)

        topic.add_subscription(subs.SqsSubscription(queue))
Esempio n. 5
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        queue = sqs.Queue(
            self,
            "HelloQueue",
            visibility_timeout=core.Duration.seconds(300),
        )

        topic = sns.Topic(self, "HelloTopic")

        topic.add_subscription(subs.SqsSubscription(queue))
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        queue = sqs.Queue(
            self, "%name.PascalCased%Queue",
            visibility_timeout=Duration.seconds(300),
        )

        topic = sns.Topic(
            self, "%name.PascalCased%Topic"
        )

        topic.add_subscription(subs.SqsSubscription(queue))
Esempio n. 7
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        queue = sqs.Queue(self,
                          "MyFirstQueue",
                          visibility_timeout=core.Duration.seconds(300))

        topic = sns.Topic(self, "MyFirstTopic", display_name="My First Topic")

        topic.add_subscription(subs.SqsSubscription(queue))

        hello = CloudendureConstruct(self, "MyHelloConstruct", num_buckets=4)
        user = iam.User(self, "MyUser")
        hello.grant_read(user)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        s3.Bucket(self, "myBucketId")

        queue = sqs.Queue(
            self,
            "MyFirstCdkProjectQueue",
            visibility_timeout=core.Duration.seconds(300),
        )

        topic = sns.Topic(self, "MyFirstCdkProjectTopic")

        topic.add_subscription(subs.SqsSubscription(queue))
Esempio n. 9
0
    def __init__(self, app: cdk.App, id: str, **kwargs) -> None:
        super().__init__(app, id, **kwargs)

        queue = sqs.Queue(
            self,
            "MyFirstQueue",
            visibility_timeout_sec=300,
        )

        topic = sns.Topic(self, "MyFirstTopic", display_name="My First Topic")

        topic.add_subscription(subs.SqsSubscription(queue))

        hello = HelloConstruct(self, "MyHelloConstruct", num_buckets=4)
        user = iam.User(self, "MyUser")
        hello.grant_read(user)
Esempio n. 10
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        queue = sqs.Queue(
            self,
            "Lab10Queue",
            visibility_timeout=core.Duration.seconds(300),
        )

        topic = sns.Topic(self, "Lab10Topic")

        topic.add_subscription(subs.SqsSubscription(queue))

        vpc = ec2.Vpc.from_lookup(self,
                                  "aws-cdk-handson-lab02-vpc",
                                  vpc_name="default")

        # The code that defines your stack goes here
        core.CfnOutput(self, "CDKMetadata", value="hello cdk")
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Define S3 bucket
        my_bucket = s3.Bucket(self, "ssl-s3-sns-event-raw")

        #Add Filters if required
        filter1 = s3.NotificationKeyFilter(prefix="home/")

        #sns Topic
        my_sns = sns.Topic(self,
                           id="my-sns-topic",
                           display_name="my-sns-topic")

        #Create the s3 notification objects which points to Lambda
        notification = notifications.SnsDestination(my_sns)

        #link s3 and sns
        my_bucket.add_event_notification(s3.EventType.OBJECT_CREATED,
                                         notification, filter1)

        #create sqs queue
        my_sqs = sqs.Queue(self, id="my-queue")

        #create sqs / sns subcription
        subscription = aws_sns_subscriptions.SqsSubscription(my_sqs)

        #add subscription to sns.
        my_sns.add_subscription(subscription)

        #create lambda function
        my_lambda = _lambda.Function(self,
                                     "HelloHandler",
                                     runtime=_lambda.Runtime.PYTHON_3_7,
                                     handler="hello.handler",
                                     code=_lambda.Code.asset('lambda'))

        #create sns/lambda subscription
        subscription = aws_sns_subscriptions.LambdaSubscription(my_lambda)

        #add lambda subscription to sns
        my_sns.add_subscription(subscription)
Esempio n. 12
0
    def __init__(self, scope: core.Construct, id: str, queue_name,
                 **kwargs) -> None:
        self.name_check = self.check_name_of_queue(queue_name)

        super().__init__(scope, id, **kwargs)

        if self.name_check == True:

            queue = sqs.Queue(
                self,
                queue_name,
                visibility_timeout=core.Duration.seconds(300),
            )

            topic = sns.Topic(self, "CdkTopic")

            topic.add_subscription(subs.SqsSubscription(queue))
        else:
            print("Check your queue name pls")
            exit(1)
Esempio n. 13
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        dispatcher = sns.Topic(self, 'TestDispatcher')
        core.CfnOutput(self,
                       'testDispatcher',
                       value=dispatcher.topic_arn,
                       export_name='testDispatcherArn')

        for queueName in ['q1', 'q2', 'q3', 'q4']:
            q = sqs.Queue(self, queueName)
            dispatcher.add_subscription(
                subscriptions.SqsSubscription(
                    q,
                    raw_message_delivery=True,
                ))

            core.CfnOutput(self,
                           f'{queueName}Url',
                           value=q.queue_url,
                           export_name=f'{queueName}Url')
Esempio n. 14
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        queue = sqs.Queue(
            self, "SlsApiQueue",
            visibility_timeout=core.Duration.seconds(300),
        )

        topic = sns.Topic(
            self, "SlsApiTopic"
        )

        topic.add_subscription(subs.SqsSubscription(queue))

        ## S1. Define the table that maps short codes to URLs.
        table = aws_dynamodb.Table(self, "mapping-table",
                partition_key=aws_dynamodb.Attribute(
                    name="id",
                    type=aws_dynamodb.AttributeType.STRING),
                read_capacity=10,
                write_capacity=5)

        ## S2.1. Defines Lambda resource & API-Gateway request handler
        ## All API requests will go to the same function.
        handler = aws_lambda.Function(self, "SlsApiFunction",
                            code=aws_lambda.Code.asset("./lambda"),
                            handler="handler.main",
                            timeout=core.Duration.minutes(5),
                            runtime=aws_lambda.Runtime.PYTHON_3_7)

        ## S2.2. Pass the table name to the handler through an env variable 
        ## and grant the handler read/write permissions on the table.
        table.grant_read_write_data(handler)
        handler.add_environment('TABLE_NAME', table.table_name)

        ## S3. Define the API endpoint and associate the handler
        api = aws_apigateway.LambdaRestApi(self, "SlsApiGateway",
                                           handler=handler)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        queue = sqs.Queue(self,
                          "StartProwlerScan",
                          receive_message_wait_time=core.Duration.seconds(20),
                          visibility_timeout=core.Duration.seconds(7200))
        push_all_active_accounts_onto_queue_lambda_function = lambda_.Function(
            self,
            "PushAllActiveAccountsOntoQueue",
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.asset("lambda/pushAllActiveActivesOntoQueue"),
            handler="lambda_function.lambda_handler",
            environment={"SQS_QUEUE_URL": queue.queue_url})
        event_lambda_target = events_targets.LambdaFunction(
            handler=push_all_active_accounts_onto_queue_lambda_function)
        queue.grant_send_messages(
            push_all_active_accounts_onto_queue_lambda_function)
        schedule = events.Schedule.rate(core.Duration.days(1))
        events.Rule(self,
                    "DailyTrigger",
                    schedule=schedule,
                    targets=[event_lambda_target])

        vpc = ec2.Vpc(self, "Vpc")
        cluster = ecs.Cluster(self, "Cluster", vpc=vpc)
        logging = ecs.AwsLogDriver(stream_prefix="ProwlerTask",
                                   log_retention=logs.RetentionDays.ONE_DAY)
        results_bucket = s3.Bucket(self, "ResultsBucket")
        dockerfile_directory = path.join(path.dirname(path.realpath(__file__)),
                                         "docker")
        image = ecr_assets.DockerImageAsset(self,
                                            "ProwlerImageBuild",
                                            directory=dockerfile_directory)
        prowler_task = ecs.FargateTaskDefinition(self,
                                                 "ProwlerTaskDefinition",
                                                 cpu=256,
                                                 memory_limit_mib=512)
        prowler_task.add_container(
            "Prowler_image",
            image=ecs.ContainerImage.from_docker_image_asset(image),
            logging=logging,
            environment={
                "RESULTS_BUCKET": results_bucket.bucket_name,
                "SQS_QUEUE_URL": queue.queue_url
            })
        task_role = prowler_task.task_role
        task_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name("ReadOnlyAccess"))
        queue.grant(task_role, "sqs:DeleteMessage")
        results_bucket.grant_put(task_role)
        task_role.attach_inline_policy(
            iam.Policy(self,
                       "AssumeRolePermissions",
                       statements=[
                           iam.PolicyStatement(actions=["sts:AssumeRole"],
                                               effect=iam.Effect.ALLOW,
                                               resources=["*"])
                       ]))
        run_fargate_task_lambda_function = lambda_.Function(
            self,
            "RunFargateTask",
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.asset("lambda/runFargateTask"),
            handler="lambda_function.lambda_handler",
            environment={
                "CLUSTER_ARN":
                cluster.cluster_arn,
                "SUBNET_IDS":
                json.dumps(
                    [subnet.subnet_id for subnet in vpc.private_subnets]),
                "QUEUE_URL":
                queue.queue_url,
                "TASK_DEFINITION_ARN":
                prowler_task.task_definition_arn
            })
        queue.grant(run_fargate_task_lambda_function, "sqs:GetQueueAttributes")
        sqs_alarm_topic = sns.Topic(self, "SqsAlarmTopic")
        sqs_alarm_topic.grant_publish(run_fargate_task_lambda_function)
        sqs_alarm_queue = sqs.Queue(
            self,
            "SqsAlarmQueue",
            retention_period=core.Duration.days(14),
            visibility_timeout=core.Duration.minutes(3))
        sqs_alarm_topic.add_subscription(
            sns_subscriptions.SqsSubscription(sqs_alarm_queue))
        run_fargate_task_lambda_function.add_event_source(
            lambda_event_sources.SqsEventSource(sqs_alarm_queue))
        run_fargate_task_lambda_function.add_to_role_policy(
            iam.PolicyStatement(actions=["ecs:RunTask"],
                                effect=iam.Effect.ALLOW,
                                resources=[prowler_task.task_definition_arn]))
        run_fargate_task_lambda_function.add_to_role_policy(
            iam.PolicyStatement(actions=["iam:PassRole"],
                                effect=iam.Effect.ALLOW,
                                resources=[
                                    prowler_task.execution_role.role_arn,
                                    prowler_task.task_role.role_arn
                                ]))
        sqs_ok_topic = sns.Topic(self, "SqsOkTopic")
        clear_alarm_queue = lambda_.Function(
            self,
            "ClearAlarmQueue",
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.asset("lambda/clearAlarmQueue"),
            handler="lambda_function.lambda_handler",
            environment={"QUEUE_URL": sqs_alarm_queue.queue_url})
        clear_alarm_queue.add_event_source(
            lambda_event_sources.SnsEventSource(sqs_ok_topic))
        sqs_alarm_queue.grant(clear_alarm_queue, "sqs:DeleteMessage")

        alarm = cloudwatch.Alarm(
            self,
            "FargateTaskTrigger",
            metric=queue.metric_approximate_number_of_messages_visible(
                period=core.Duration.seconds(60), statistic="max"),
            evaluation_periods=1,
            threshold=1,
            alarm_description="Run a fargate task when there "
            "are messages in the queue",
            treat_missing_data=cloudwatch.TreatMissingData.IGNORE)
        alarm.add_alarm_action(cloudwatch_actions.SnsAction(sqs_alarm_topic))
        alarm.add_ok_action(cloudwatch_actions.SnsAction(sqs_ok_topic))
Esempio n. 16
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        handler: str,
        memory: int = 3008,
        timeout: int = 900,
        concurrent: int = 10,
        retry: int = 0,
        permissions: Optional[List[iam.PolicyStatement]] = None,
        environment: Dict = {},
        code_dir: str = "./",
        **kwargs: Any,
    ) -> None:
        """Create AWS Lambda watchbot stack. """
        super().__init__(scope, id, **kwargs)

        permissions = permissions or []

        topic = sns.Topic(self,
                          "lambdaTopic",
                          display_name="Lambda Watchbot SNS Topic")
        core.CfnOutput(
            self,
            "SNSTopic",
            value=topic.topic_arn,
            description="SNS Topic ARN",
            export_name="SNSTopic",
        )

        dlqueue = sqs.Queue(self, "lambdaDeadLetterQueue")
        queue = sqs.Queue(
            self,
            "lambdaQueue",
            visibility_timeout=core.Duration.seconds(timeout),
            dead_letter_queue=sqs.DeadLetterQueue(queue=dlqueue,
                                                  max_receive_count=3),
        )
        core.CfnOutput(
            self,
            "SQSQueueURL",
            value=queue.queue_url,
            description="SQS URL",
            export_name="SQSQueueURL",
        )

        topic.add_subscription(sns_sub.SqsSubscription(queue))

        asset = aws_lambda.AssetImageCode(directory="./")

        worker = aws_lambda.Function(
            self,
            f"{id}-lambda",
            description="Watchbot's worker",
            code=asset,
            handler=aws_lambda.Handler.FROM_IMAGE,
            runtime=aws_lambda.Runtime.FROM_IMAGE,
            memory_size=memory,
            reserved_concurrent_executions=concurrent,
            timeout=core.Duration.seconds(timeout),
            retry_attempts=retry,
            environment=environment,
        )

        for perm in permissions:
            worker.add_to_role_policy(perm)

        worker.add_event_source(
            aws_lambda_event_sources.SqsEventSource(queue, batch_size=1))
        topic.grant_publish(worker)
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 stack_log_level: str, store_events_topic, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Add your stack resources below)

        # Sales Queue Consumer
        sales_q = _sqs.Queue(
            self,
            "salesEventsQueue",
            delivery_delay=cdk.Duration.seconds(5),
            queue_name=f"sales_q",
            retention_period=cdk.Duration.days(2),
            visibility_timeout=cdk.Duration.seconds(10),
            receive_message_wait_time=cdk.Duration.seconds(10))

        # Create a Filter for Sales Subscription
        sales_policy = {
            "evnt_type": _sns.SubscriptionFilter(conditions=["sales-event"])
        }

        # Create an SQS type subscription to SNS
        sales_subs = _sns_subs.SqsSubscription(sales_q,
                                               filter_policy=sales_policy)

        # Add the scription to topic
        store_events_topic.add_subscription(sales_subs)

        # Read Lambda Code
        try:
            with open(
                    "stacks/back_end/serverless_sns_consumer_stack/lambda_src/sqs_data_consumer.py",
                    encoding="utf-8",
                    mode="r") as f:
                msg_consumer_fn_code = f.read()
        except OSError:
            print("Unable to read Lambda Function Code")
            raise
        msg_consumer_fn = _lambda.Function(
            self,
            "msgConsumerFn",
            function_name=f"sales_queue_consumer_fn",
            description="Process messages in SQS queue",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.InlineCode(msg_consumer_fn_code),
            handler="index.lambda_handler",
            timeout=cdk.Duration.seconds(5),
            reserved_concurrent_executions=1,
            environment={
                "LOG_LEVEL": f"{stack_log_level}",
                "APP_ENV": "Production",
                "SALES_QUEUE_NAME": f"{sales_q.queue_name}",
                "TRIGGER_RANDOM_DELAY": "True"
            })

        msg_consumer_fn_version = msg_consumer_fn.latest_version
        msg_consumer_fn_version_alias = _lambda.Alias(
            self,
            "msgConsumerFnAlias",
            alias_name="MystiqueAutomation",
            version=msg_consumer_fn_version)

        # Create Custom Loggroup for Producer
        msg_consumer_fn_lg = _logs.LogGroup(
            self,
            "msgConsumerFnLogGroup",
            log_group_name=f"/aws/lambda/{msg_consumer_fn.function_name}",
            removal_policy=cdk.RemovalPolicy.DESTROY,
            retention=_logs.RetentionDays.ONE_DAY)

        # Restrict Produce Lambda to be invoked only from the stack owner account
        msg_consumer_fn.add_permission("restrictLambdaInvocationToOwnAccount",
                                       principal=_iam.AccountRootPrincipal(),
                                       action="lambda:InvokeFunction",
                                       source_account=cdk.Aws.ACCOUNT_ID,
                                       source_arn=sales_q.queue_arn)

        # Set our Lambda Function to be invoked by SQS
        msg_consumer_fn.add_event_source(_sqsEventSource(sales_q,
                                                         batch_size=5))

        ###########################################
        ################# OUTPUTS #################
        ###########################################
        output_0 = cdk.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{GlobalArgs.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page."
        )

        output_2 = cdk.CfnOutput(
            self,
            "SalesEventsConsumer",
            value=
            f"https://console.aws.amazon.com/lambda/home?region={cdk.Aws.REGION}#/functions/{msg_consumer_fn.function_name}",
            description="Process events received from SQS event bus")
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        ###########################################################################
        # AWS SECRETS MANAGER - Templated secret
        ###########################################################################
        # templated_secret = aws_secretsmanager.Secret(self, "TemplatedSecret",
        #     generate_secret_string=aws_secretsmanager.SecretStringGenerator(
        #         secret_string_template= "{\"username\":\"cleanbox\"}",
        #         generate_string_key="password"
        #     )
        # )
        ###########################################################################
        # CUSTOM CLOUDFORMATION RESOURCE
        ###########################################################################
        # customlambda = aws_lambda.Function(self,'customconfig',
        # handler='customconfig.on_event',
        # runtime=aws_lambda.Runtime.PYTHON_3_7,
        # code=aws_lambda.Code.asset('customconfig'),
        # )

        # customlambda_statement = aws_iam.PolicyStatement(actions=["events:PutRule"], conditions=None, effect=None, not_actions=None, not_principals=None, not_resources=None, principals=None, resources=["*"], sid=None)
        # customlambda.add_to_role_policy(statement=customlambda_statement)

        # my_provider = cr.Provider(self, "MyProvider",
        #     on_event_handler=customlambda,
        #     # is_complete_handler=is_complete, # optional async "waiter"
        #     log_retention=logs.RetentionDays.SIX_MONTHS
        # )

        # CustomResource(self, 'customconfigresource', service_token=my_provider.service_token)

        ###########################################################################
        # AWS LAMBDA FUNCTIONS
        ###########################################################################
        sqs_to_elastic_cloud = aws_lambda.Function(
            self,
            'sqs_to_elastic_cloud',
            handler='sqs_to_elastic_cloud.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset('sqs_to_elastic_cloud'),
            memory_size=4096,
            timeout=core.Duration.seconds(300),
            log_retention=logs.RetentionDays.ONE_DAY)

        sqs_to_elasticsearch_service = aws_lambda.Function(
            self,
            'sqs_to_elasticsearch_service',
            handler='sqs_to_elasticsearch_service.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset('sqs_to_elasticsearch_service'),
            memory_size=4096,
            timeout=core.Duration.seconds(300),
            log_retention=logs.RetentionDays.ONE_DAY)

        # sqs_to_elasticsearch_service.add_environment("kinesis_firehose_name", "-")
        # sqs_to_elastic_cloud.add_environment("index_name", "-")

        ###########################################################################
        # AWS LAMBDA FUNCTIONS
        ###########################################################################
        # sqs_to_elasticsearch_service_permission = aws_lambda.Permission(*, principal, action=None, event_source_token=None, scope=None, source_account=None, source_arn=None)

        ###########################################################################
        # AMAZON S3 BUCKETS
        ###########################################################################
        access_log_bucket = aws_s3.Bucket(self, "access_log_bucket")
        kinesis_log_bucket = aws_s3.Bucket(self, "kinesis_log_bucket")

        ###########################################################################
        # LAMBDA SUPPLEMENTAL POLICIES
        ###########################################################################
        lambda_supplemental_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=["s3:Get*", "s3:Head*", "s3:List*", "firehose:*"],
            resources=["*"])

        sqs_to_elastic_cloud.add_to_role_policy(
            lambda_supplemental_policy_statement)
        sqs_to_elasticsearch_service.add_to_role_policy(
            lambda_supplemental_policy_statement)
        ###########################################################################
        # AWS SNS TOPICS
        ###########################################################################
        access_log_topic = aws_sns.Topic(self, "access_log_topic")

        ###########################################################################
        # ADD AMAZON S3 BUCKET NOTIFICATIONS
        ###########################################################################
        access_log_bucket.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED,
            aws_s3_notifications.SnsDestination(access_log_topic))

        ###########################################################################
        # AWS SQS QUEUES
        ###########################################################################
        sqs_to_elasticsearch_service_queue_iqueue = aws_sqs.Queue(
            self, "sqs_to_elasticsearch_service_queue_dlq")
        sqs_to_elasticsearch_service_queue_dlq = aws_sqs.DeadLetterQueue(
            max_receive_count=10,
            queue=sqs_to_elasticsearch_service_queue_iqueue)
        sqs_to_elasticsearch_service_queue = aws_sqs.Queue(
            self,
            "sqs_to_elasticsearch_service_queue",
            visibility_timeout=core.Duration.seconds(301),
            dead_letter_queue=sqs_to_elasticsearch_service_queue_dlq)

        sqs_to_elastic_cloud_queue_iqueue = aws_sqs.Queue(
            self, "sqs_to_elastic_cloud_queue_dlq")
        sqs_to_elastic_cloud_queue_dlq = aws_sqs.DeadLetterQueue(
            max_receive_count=10, queue=sqs_to_elastic_cloud_queue_iqueue)
        sqs_to_elastic_cloud_queue = aws_sqs.Queue(
            self,
            "sqs_to_elastic_cloud_queue",
            visibility_timeout=core.Duration.seconds(301),
            dead_letter_queue=sqs_to_elastic_cloud_queue_dlq)

        ###########################################################################
        # AWS SNS TOPIC SUBSCRIPTIONS
        ###########################################################################
        access_log_topic.add_subscription(
            aws_sns_subscriptions.SqsSubscription(sqs_to_elastic_cloud_queue))
        access_log_topic.add_subscription(
            aws_sns_subscriptions.SqsSubscription(
                sqs_to_elasticsearch_service_queue))

        ###########################################################################
        # AWS LAMBDA SQS EVENT SOURCE
        ###########################################################################
        sqs_to_elastic_cloud.add_event_source(
            SqsEventSource(sqs_to_elastic_cloud_queue, batch_size=10))
        sqs_to_elasticsearch_service.add_event_source(
            SqsEventSource(sqs_to_elasticsearch_service_queue, batch_size=10))

        ###########################################################################
        # AWS ELASTICSEARCH DOMAIN
        ###########################################################################

        ###########################################################################
        # AWS ELASTICSEARCH DOMAIN ACCESS POLICY
        ###########################################################################
        this_aws_account = aws_iam.AccountPrincipal(account_id="012345678912")
        # s3_to_elasticsearch_access_logs_domain_access_policy_statement = aws_iam.PolicyStatement(
        #     principals=[this_aws_account],
        #     effect=aws_iam.Effect.ALLOW,
        #     actions=["es:*"],
        #     resources=["*"]
        #     )
        # s3_to_elasticsearch_access_logs_domain_access_policy_statement_list=[]
        # s3_to_elasticsearch_access_logs_domain_access_policy_statement_list.append(s3_to_elasticsearch_access_logs_domain_access_policy_statement)

        s3_to_elasticsearch_access_logs_domain = aws_elasticsearch.Domain(
            self,
            "s3-to-elasticsearch-access-logs-domain",
            # access_policies=s3_to_elasticsearch_access_logs_domain_access_policy_statement_list,
            version=aws_elasticsearch.ElasticsearchVersion.V7_1,
            capacity={
                "master_nodes": 3,
                "data_nodes": 4
            },
            ebs={"volume_size": 100},
            zone_awareness={"availability_zone_count": 2},
            logging={
                "slow_search_log_enabled": True,
                "app_log_enabled": True,
                "slow_index_log_enabled": True
            })

        ###########################################################################
        # AMAZON COGNITO USER POOL
        ###########################################################################
        s3_to_elasticsearch_user_pool = aws_cognito.UserPool(
            self,
            "s3-to-elasticsearch-access-logs-pool",
            account_recovery=None,
            auto_verify=None,
            custom_attributes=None,
            email_settings=None,
            enable_sms_role=None,
            lambda_triggers=None,
            mfa=None,
            mfa_second_factor=None,
            password_policy=None,
            self_sign_up_enabled=None,
            sign_in_aliases=aws_cognito.SignInAliases(email=True,
                                                      phone=None,
                                                      preferred_username=None,
                                                      username=True),
            sign_in_case_sensitive=None,
            sms_role=None,
            sms_role_external_id=None,
            standard_attributes=None,
            user_invitation=None,
            user_pool_name=None,
            user_verification=None)

        ###########################################################################
        # AMAZON KINESIS FIREHOSE STREAM
        ###########################################################################
        # kinesis_policy_statement = aws_iam.PolicyStatement(
        #     effect=aws_iam.Effect.ALLOW,
        #     # actions=["es:*", "s3:*", "kms:*", "kinesis:*", "lambda:*"],
        #     actions=["*"],
        #     resources=["*"]
        #     )

        # kinesis_policy_document = aws_iam.PolicyDocument()
        # kinesis_policy_document.add_statements(kinesis_policy_statement)

        kinesis_firehose_stream_role = aws_iam.Role(
            self,
            "BaseVPCIAMLogRole",
            assumed_by=aws_iam.ServicePrincipal('firehose.amazonaws.com'),
            role_name=None,
            inline_policies={
                "AllowLogAccess":
                aws_iam.PolicyDocument(
                    assign_sids=False,
                    statements=[
                        aws_iam.PolicyStatement(actions=[
                            '*', 'es:*', 'logs:PutLogEvents',
                            'logs:DescribeLogGroups',
                            'logs:DescribeLogsStreams'
                        ],
                                                effect=aws_iam.Effect('ALLOW'),
                                                resources=['*'])
                    ])
            })

        RetryOptions = aws_kinesisfirehose.CfnDeliveryStream.ElasticsearchRetryOptionsProperty(
            duration_in_seconds=300)
        s3_configuration = aws_kinesisfirehose.CfnDeliveryStream.S3DestinationConfigurationProperty(
            bucket_arn=kinesis_log_bucket.bucket_arn,
            role_arn=kinesis_firehose_stream_role.role_arn)

        ElasticsearchDestinationConfiguration = aws_kinesisfirehose.CfnDeliveryStream.ElasticsearchDestinationConfigurationProperty(
            # "BufferingHints" : ElasticsearchBufferingHints,
            # "CloudWatchLoggingOptions" : CloudWatchLoggingOptions,
            # "ClusterEndpoint" : String,
            domain_arn=s3_to_elasticsearch_access_logs_domain.domain_arn,
            index_name="s3-to-elasticsearch-accesslogs",
            index_rotation_period="OneDay",
            # "ProcessingConfiguration" : ProcessingConfiguration,
            retry_options=RetryOptions,
            role_arn=kinesis_firehose_stream_role.role_arn,
            # "S3BackupMode" : String,
            s3_configuration=s3_configuration
            # "TypeName" : String
            # "VpcConfiguration" : VpcConfiguration
        )

        kinesis_firehose_stream = aws_kinesisfirehose.CfnDeliveryStream(
            self,
            "kinesis_firehose_stream",
            delivery_stream_encryption_configuration_input=None,
            delivery_stream_name=None,
            delivery_stream_type=None,
            elasticsearch_destination_configuration=
            ElasticsearchDestinationConfiguration,
            extended_s3_destination_configuration=None,
            http_endpoint_destination_configuration=None,
            kinesis_stream_source_configuration=None,
            redshift_destination_configuration=None,
            s3_destination_configuration=None,
            splunk_destination_configuration=None,
            tags=None)

        sqs_to_elasticsearch_service.add_environment(
            "FIREHOSE_NAME", kinesis_firehose_stream.ref)
        sqs_to_elasticsearch_service.add_environment(
            "QUEUEURL", sqs_to_elasticsearch_service_queue.queue_url)
        sqs_to_elasticsearch_service.add_environment("DEBUG", "False")

        sqs_to_elastic_cloud.add_environment("ELASTICCLOUD_SECRET_NAME", "-")
        sqs_to_elastic_cloud.add_environment("ELASTIC_CLOUD_ID", "-")
        sqs_to_elastic_cloud.add_environment("ELASTIC_CLOUD_PASSWORD", "-")
        sqs_to_elastic_cloud.add_environment("ELASTIC_CLOUD_USERNAME", "-")
        sqs_to_elastic_cloud.add_environment(
            "QUEUEURL", sqs_to_elastic_cloud_queue.queue_url)
        sqs_to_elastic_cloud.add_environment("DEBUG", "False")
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        ###########################################################################
        # AWS SECRETS MANAGER - Templated secret
        ###########################################################################
        # templated_secret = aws_secretsmanager.Secret(self, "TemplatedSecret",
        #     generate_secret_string=aws_secretsmanager.SecretStringGenerator(
        #         secret_string_template= "{\"username\":\"cleanbox\"}",
        #         generate_string_key="password"
        #     )
        # )
        ###########################################################################
        # CUSTOM CLOUDFORMATION RESOURCE
        ###########################################################################
        # customlambda = aws_lambda.Function(self,'customconfig',
        # handler='customconfig.on_event',
        # runtime=aws_lambda.Runtime.PYTHON_3_7,
        # code=aws_lambda.Code.asset('customconfig'),
        # )

        # customlambda_statement = aws_iam.PolicyStatement(actions=["events:PutRule"], conditions=None, effect=None, not_actions=None, not_principals=None, not_resources=None, principals=None, resources=["*"], sid=None)
        # customlambda.add_to_role_policy(statement=customlambda_statement)

        # my_provider = cr.Provider(self, "MyProvider",
        #     on_event_handler=customlambda,
        #     # is_complete_handler=is_complete, # optional async "waiter"
        #     log_retention=logs.RetentionDays.SIX_MONTHS
        # )

        # CustomResource(self, 'customconfigresource', service_token=my_provider.service_token)

        ###########################################################################
        # AWS LAMBDA FUNCTIONS
        ###########################################################################
        sqs_to_elastic_cloud = aws_lambda.Function(
            self,
            'sqs_to_elastic_cloud',
            handler='sqs_to_elastic_cloud.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset('sqs_to_elastic_cloud'),
            memory_size=4096,
            timeout=core.Duration.seconds(301),
            log_retention=logs.RetentionDays.ONE_DAY)

        sqs_to_elasticsearch_service = aws_lambda.Function(
            self,
            'sqs_to_elasticsearch_service',
            handler='sqs_to_elasticsearch_service.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset('sqs_to_elasticsearch_service'),
            memory_size=4096,
            timeout=core.Duration.seconds(301),
            log_retention=logs.RetentionDays.ONE_DAY)
        ###########################################################################
        # AWS LAMBDA FUNCTIONS
        ###########################################################################

        ###########################################################################
        # AMAZON S3 BUCKETS
        ###########################################################################
        cloudtrail_log_bucket = aws_s3.Bucket(self, "cloudtrail_log_bucket")

        ###########################################################################
        # LAMBDA SUPPLEMENTAL POLICIES
        ###########################################################################
        lambda_supplemental_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=["s3:Get*", "s3:Head*", "s3:List*", "firehose:*", "es:*"],
            resources=["*"])

        sqs_to_elastic_cloud.add_to_role_policy(
            lambda_supplemental_policy_statement)
        sqs_to_elasticsearch_service.add_to_role_policy(
            lambda_supplemental_policy_statement)
        ###########################################################################
        # AWS SNS TOPICS
        ###########################################################################
        cloudtrail_log_topic = aws_sns.Topic(self, "cloudtrail_log_topic")

        ###########################################################################
        # ADD AMAZON S3 BUCKET NOTIFICATIONS
        ###########################################################################
        cloudtrail_log_bucket.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED,
            aws_s3_notifications.SnsDestination(cloudtrail_log_topic))

        ###########################################################################
        # AWS SQS QUEUES
        ###########################################################################
        sqs_to_elasticsearch_service_queue_iqueue = aws_sqs.Queue(
            self, "sqs_to_elasticsearch_service_queue_dlq")
        sqs_to_elasticsearch_service_queue_dlq = aws_sqs.DeadLetterQueue(
            max_receive_count=10,
            queue=sqs_to_elasticsearch_service_queue_iqueue)
        sqs_to_elasticsearch_service_queue = aws_sqs.Queue(
            self,
            "sqs_to_elasticsearch_service_queue",
            visibility_timeout=core.Duration.seconds(300),
            dead_letter_queue=sqs_to_elasticsearch_service_queue_dlq)

        sqs_to_elastic_cloud_queue_iqueue = aws_sqs.Queue(
            self, "sqs_to_elastic_cloud_queue_dlq")
        sqs_to_elastic_cloud_queue_dlq = aws_sqs.DeadLetterQueue(
            max_receive_count=10, queue=sqs_to_elastic_cloud_queue_iqueue)
        sqs_to_elastic_cloud_queue = aws_sqs.Queue(
            self,
            "sqs_to_elastic_cloud_queue",
            visibility_timeout=core.Duration.seconds(300),
            dead_letter_queue=sqs_to_elastic_cloud_queue_dlq)

        ###########################################################################
        # AWS SNS TOPIC SUBSCRIPTIONS
        ###########################################################################
        cloudtrail_log_topic.add_subscription(
            aws_sns_subscriptions.SqsSubscription(sqs_to_elastic_cloud_queue))
        cloudtrail_log_topic.add_subscription(
            aws_sns_subscriptions.SqsSubscription(
                sqs_to_elasticsearch_service_queue))

        ###########################################################################
        # AWS LAMBDA SQS EVENT SOURCE
        ###########################################################################
        sqs_to_elastic_cloud.add_event_source(
            SqsEventSource(sqs_to_elastic_cloud_queue, batch_size=10))
        sqs_to_elasticsearch_service.add_event_source(
            SqsEventSource(sqs_to_elasticsearch_service_queue, batch_size=10))

        ###########################################################################
        # AWS ELASTICSEARCH DOMAIN
        ###########################################################################

        ###########################################################################
        # AWS ELASTICSEARCH DOMAIN ACCESS POLICY
        ###########################################################################
        this_aws_account = aws_iam.AccountPrincipal(account_id="012345678912")

        s3_to_elasticsearch_cloudtrail_logs_domain = aws_elasticsearch.Domain(
            self,
            "s3-to-elasticsearch-cloudtrail-logs-domain",
            version=aws_elasticsearch.ElasticsearchVersion.V7_1,
            capacity={
                "master_nodes": 3,
                "data_nodes": 4
            },
            ebs={"volume_size": 100},
            zone_awareness={"availability_zone_count": 2},
            logging={
                "slow_search_log_enabled": True,
                "app_log_enabled": True,
                "slow_index_log_enabled": True
            })

        ###########################################################################
        # AMAZON COGNITO USER POOL
        ###########################################################################
        s3_to_elasticsearch_user_pool = aws_cognito.UserPool(
            self,
            "s3-to-elasticsearch-cloudtrial-logs-pool",
            account_recovery=None,
            auto_verify=None,
            custom_attributes=None,
            email_settings=None,
            enable_sms_role=None,
            lambda_triggers=None,
            mfa=None,
            mfa_second_factor=None,
            password_policy=None,
            self_sign_up_enabled=None,
            sign_in_aliases=aws_cognito.SignInAliases(email=True,
                                                      phone=None,
                                                      preferred_username=None,
                                                      username=True),
            sign_in_case_sensitive=None,
            sms_role=None,
            sms_role_external_id=None,
            standard_attributes=None,
            user_invitation=None,
            user_pool_name=None,
            user_verification=None)

        sqs_to_elasticsearch_service.add_environment(
            "ELASTICSEARCH_HOST",
            s3_to_elasticsearch_cloudtrail_logs_domain.domain_endpoint)
        sqs_to_elasticsearch_service.add_environment(
            "QUEUEURL", sqs_to_elasticsearch_service_queue.queue_url)
        sqs_to_elasticsearch_service.add_environment("DEBUG", "False")

        sqs_to_elastic_cloud.add_environment("ELASTICCLOUD_SECRET_NAME", "-")
        sqs_to_elastic_cloud.add_environment("ELASTIC_CLOUD_ID", "-")
        sqs_to_elastic_cloud.add_environment("ELASTIC_CLOUD_PASSWORD", "-")
        sqs_to_elastic_cloud.add_environment("ELASTIC_CLOUD_USERNAME", "-")
        sqs_to_elastic_cloud.add_environment(
            "QUEUEURL", sqs_to_elastic_cloud_queue.queue_url)
        sqs_to_elastic_cloud.add_environment("DEBUG", "False")

        ###########################################################################
        # AWS COGNITO USER POOL
        ###########################################################################
        allevents_trail = aws_cloudtrail.Trail(
            self,
            "allevents_trail",
            bucket=cloudtrail_log_bucket,
            cloud_watch_log_group=None,
            cloud_watch_logs_retention=None,
            enable_file_validation=None,
            encryption_key=None,
            include_global_service_events=None,
            is_multi_region_trail=True,
            kms_key=None,
            management_events=aws_cloudtrail.ReadWriteType("ALL"),
            s3_key_prefix=None,
            send_to_cloud_watch_logs=False,
            sns_topic=None,
            trail_name=None)
    def __init__(self, scope: core.Construct, id: str,
                 infra: RtspBaseResourcesConstruct, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create the inventory bucket...
        self.inventories = s3.Bucket(
            self,
            'InventoryBucket',
            bucket_name='homenet-{}.rtsp-inventories.{}.virtual.world'.format(
                infra.landing_zone.zone_name,
                core.Stack.of(self).region).lower(),
            removal_policy=core.RemovalPolicy.DESTROY,
            cors=[
                s3.CorsRule(allowed_methods=[s3.HttpMethods.GET],
                            allowed_origins=['*'])
            ],
            lifecycle_rules=[
                s3.LifecycleRule(
                    id='Transition-to-IA-after-30D',
                    prefix='eufy/',
                    abort_incomplete_multipart_upload_after=core.Duration.days(
                        7),
                    transitions=[
                        s3.Transition(
                            storage_class=s3.StorageClass.INFREQUENT_ACCESS,
                            transition_after=core.Duration.days(30))
                    ])
            ])

        # Create inventory collections for the Eufy Homebases...
        infra.bucket.add_inventory(
            objects_prefix='eufy/',
            inventory_id='{}-InventoryReport'.format('EufyFull'),
            format=s3.InventoryFormat.CSV,
            frequency=s3.InventoryFrequency.DAILY,
            include_object_versions=s3.InventoryObjectVersion.CURRENT,
            destination=s3.InventoryDestination(
                bucket=self.inventories,
                bucket_owner=core.Aws.ACCOUNT_ID,
                prefix=None))

        for base_name in ['Moonbase', 'Starbase']:
            prefix = 'eufy/{}.cameras.real.world/'.format(base_name).lower()
            infra.bucket.add_inventory(
                objects_prefix=prefix,
                inventory_id='{}-InventoryReport'.format(base_name),
                format=s3.InventoryFormat.CSV,
                frequency=s3.InventoryFrequency.DAILY,
                include_object_versions=s3.InventoryObjectVersion.CURRENT,
                destination=s3.InventoryDestination(
                    bucket=self.inventories,
                    bucket_owner=core.Aws.ACCOUNT_ID,
                    prefix=None))

        # Broadcast inventory creation events...
        self.inventoryAvailable = sns.Topic(
            self,
            'InventoryAvailable',
            display_name='HomeNet-{}-Rtsp-InventoryAvailable'.format(
                infra.landing_zone.zone_name),
            topic_name='HomeNet-{}-Rtsp-InventoryAvailable'.format(
                infra.landing_zone.zone_name))

        self.inventories.add_event_notification(
            s3.EventType.OBJECT_CREATED,
            s3n.SnsDestination(topic=self.inventoryAvailable),
            s3.NotificationKeyFilter(suffix='manifest.json'))

        # Attach debug queue to the notification
        self.inventoryAvailable.add_subscription(
            subs.SqsSubscription(
                raw_message_delivery=True,
                queue=sqs.Queue(
                    self,
                    'InventoryDebugQueue',
                    removal_policy=core.RemovalPolicy.DESTROY,
                    retention_period=core.Duration.days(7),
                    queue_name='HomeNet-{}-RtspInventoryAvailable_Debug'.
                    format(infra.landing_zone.zone_name))))

        # Subscribe the GroundTruth Manifest Generator
        groundtruth = RtspGroundTruthManifestGenerationFunction(
            self,
            'GroundTruthManifest',
            infra=infra,
            topic=self.inventoryAvailable)

        self.inventories.grant_read_write(groundtruth.function.role)

        # Create the RtspNormalizeImage S3 Object Lambda
        RtspNormalizeImageAccessPoint(scope=self,
                                      id='NormalizedImage',
                                      infra=infra)
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id)

        lambda_dir = kwargs["lambda_dir"]

        # Note: A dead-letter queue is optional but it helps capture any failed messages
        dlq = sqs.Queue(self,
                        id="dead_letter_queue_id",
                        retention_period=Duration.days(7))
        dead_letter_queue = sqs.DeadLetterQueue(max_receive_count=1, queue=dlq)

        upload_queue = sqs.Queue(self,
                                 id="sample_queue_id",
                                 visibility_timeout=Duration.seconds(30),
                                 dead_letter_queue=dead_letter_queue)

        sqs_subscription = sns_subs.SqsSubscription(upload_queue,
                                                    raw_message_delivery=True)

        upload_event_topic = sns.Topic(self, id="sample_sns_topic_id")

        # This binds the SNS Topic to the SQS Queue
        upload_event_topic.add_subscription(sqs_subscription)

        # Note: Lifecycle Rules are optional but are included here to keep costs
        #       low by cleaning up old files or moving them to lower cost storage options
        s3_bucket = s3.Bucket(
            self,
            id="sample_bucket_id",
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            versioned=True,
            lifecycle_rules=[
                s3.LifecycleRule(
                    enabled=True,
                    expiration=Duration.days(365),
                    transitions=[
                        s3.Transition(
                            storage_class=s3.StorageClass.INFREQUENT_ACCESS,
                            transition_after=Duration.days(30)),
                        s3.Transition(storage_class=s3.StorageClass.GLACIER,
                                      transition_after=Duration.days(90)),
                    ])
            ])

        # Note: If you don't specify a filter all uploads will trigger an event.
        #       Also, modifying the event type will handle other object operations
        # This binds the S3 bucket to the SNS Topic
        s3_bucket.add_event_notification(
            s3.EventType.OBJECT_CREATED_PUT,
            s3n.SnsDestination(upload_event_topic),
            s3.NotificationKeyFilter(prefix="uploads", suffix=".csv"))

        function = _lambda.Function(
            self,
            "lambda_function",
            runtime=_lambda.Runtime.PYTHON_3_9,
            handler="lambda_function.handler",
            code=_lambda.Code.from_asset(path=lambda_dir))

        # This binds the lambda to the SQS Queue
        invoke_event_source = lambda_events.SqsEventSource(upload_queue)
        function.add_event_source(invoke_event_source)

        # Examples of CloudFormation outputs
        CfnOutput(
            self,
            "UploadFileToS3Example",
            value="aws s3 cp <local-path-to-file> s3://{}/".format(
                s3_bucket.bucket_name),
            description=
            "Upload a file to S3 (using AWS CLI) to trigger the SQS chain",
        )
        CfnOutput(
            self,
            "UploadSqsQueueUrl",
            value=upload_queue.queue_url,
            description="Link to the SQS Queue triggered on S3 uploads",
        )
        CfnOutput(
            self,
            "LambdaFunctionName",
            value=function.function_name,
        )
        CfnOutput(
            self,
            "LambdaFunctionLogGroupName",
            value=function.log_group.log_group_name,
        )
Esempio n. 22
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        entrypoint: Optional[List] = None,
        cpu: Union[int, float] = 256,
        memory: Union[int, float] = 512,
        mincount: int = 0,
        maxcount: int = 50,
        scaling_steps: int = 5,
        permissions: Optional[List[iam.PolicyStatement]] = None,
        vpc_id: Optional[str] = None,
        vpc_is_default: Optional[bool] = None,
        environment: dict = {},
        **kwargs: Any,
    ) -> None:
        """Define stack."""
        super().__init__(scope, id, **kwargs)

        permissions = permissions or []

        vpc = ec2.Vpc.from_lookup(self,
                                  "vpc",
                                  vpc_id=vpc_id,
                                  is_default=vpc_is_default)

        cluster = ecs.Cluster(self, f"{id}-cluster", vpc=vpc)

        topic = sns.Topic(self,
                          "ecsTopic",
                          display_name="ECS Watchbot SNS Topic")
        core.CfnOutput(
            self,
            "SNSTopic",
            value=topic.topic_arn,
            description="SNS Topic ARN",
            export_name=f"{id}-SNSTopic",
        )

        dlqueue = sqs.Queue(self, "ecsDeadLetterQueue")
        core.CfnOutput(
            self,
            "DeadSQSQueueURL",
            value=dlqueue.queue_url,
            description="DeadLetter SQS URL",
            export_name=f"{id}-DeadSQSQueueURL",
        )

        queue = sqs.Queue(
            self,
            "ecsQueue",
            dead_letter_queue=sqs.DeadLetterQueue(queue=dlqueue,
                                                  max_receive_count=3),
        )
        core.CfnOutput(
            self,
            "SQSQueueURL",
            value=queue.queue_url,
            description="SQS URL",
            export_name=f"{id}-SQSQueueURL",
        )

        environment.update({
            "REGION": self.region,
            "QUEUE_NAME": queue.queue_name
        })

        topic.add_subscription(sns_sub.SqsSubscription(queue))

        fargate_task_definition = ecs.FargateTaskDefinition(
            self,
            "FargateTaskDefinition",
            memory_limit_mib=memory,
            cpu=cpu,
        )
        log_driver = ecs.AwsLogDriver(
            stream_prefix=f"/ecs/tilebot/{id}",
            log_retention=aws_logs.RetentionDays.ONE_WEEK,
        )

        fargate_task_definition.add_container(
            "FargateContainer",
            image=ecs.ContainerImage.from_asset(directory="./"),
            entry_point=entrypoint,
            environment=environment,
            logging=log_driver,
        )

        fargate_service = ecs.FargateService(
            self,
            "FargateService",
            cluster=cluster,
            task_definition=fargate_task_definition,
            desired_count=mincount,
            enable_ecs_managed_tags=True,
            assign_public_ip=True,
        )
        permissions.append(
            iam.PolicyStatement(
                actions=["sqs:*"],
                resources=[queue.queue_arn],
            ))
        for perm in permissions:
            fargate_service.task_definition.task_role.add_to_policy(perm)

        total_number_of_message_lambda = aws_lambda.Function(
            self,
            f"{id}-TotalMessagesLambda",
            description="Create TotalNumberOfMessage metrics",
            code=aws_lambda.Code.from_inline("""const AWS = require('aws-sdk');
    exports.handler = function(event, context, callback) {
    const sqs = new AWS.SQS({ region: process.env.AWS_DEFAULT_REGION });
    const cw = new AWS.CloudWatch({ region: process.env.AWS_DEFAULT_REGION });
    return sqs.getQueueAttributes({
        QueueUrl: process.env.SQS_QUEUE_URL,
        AttributeNames: ['ApproximateNumberOfMessagesNotVisible', 'ApproximateNumberOfMessages']
    }).promise()
    .then((attrs) => {
        return cw.putMetricData({
            Namespace: 'AWS/SQS',
            MetricData: [{
            MetricName: 'TotalNumberOfMessages',
            Dimensions: [{ Name: 'QueueName', Value: process.env.SQS_QUEUE_NAME }],
            Value: Number(attrs.Attributes.ApproximateNumberOfMessagesNotVisible) +
                    Number(attrs.Attributes.ApproximateNumberOfMessages)
            }]
        }).promise();
    })
    .then((metric) => callback(null, metric))
    .catch((err) => callback(err));
};"""),
            handler="index.handler",
            runtime=aws_lambda.Runtime.NODEJS_10_X,
            timeout=core.Duration.seconds(60),
            environment={
                "SQS_QUEUE_URL": queue.queue_url,
                "SQS_QUEUE_NAME": queue.queue_name,
            },
        )
        total_number_of_message_lambda.add_to_role_policy(
            iam.PolicyStatement(
                actions=["sqs:GetQueueAttributes"],
                resources=[queue.queue_arn],
            ))
        total_number_of_message_lambda.add_to_role_policy(
            iam.PolicyStatement(
                actions=["cloudwatch:PutMetricData"],
                resources=["*"],
            ))
        total_number_of_message_lambda.add_to_role_policy(
            iam.PolicyStatement(
                actions=["logs:*"],
                resources=["arn:aws:logs:*:*:*"],
            ))

        rule = aws_events.Rule(
            self,
            "TotalMessagesSchedule",
            schedule=aws_events.Schedule.rate(core.Duration.seconds(60)),
        )
        rule.add_target(
            aws_events_targets.LambdaFunction(total_number_of_message_lambda))

        scalable_target = auto_scale.ScalableTarget(
            self,
            "AutoScallingTarget",
            min_capacity=mincount,
            max_capacity=maxcount,
            service_namespace=auto_scale.ServiceNamespace.ECS,
            resource_id="/".join([
                "service", cluster.cluster_name, fargate_service.service_name
            ]),
            scalable_dimension="ecs:service:DesiredCount",
        )
        scalable_target.node.add_dependency(fargate_service)

        scale_up = auto_scale.CfnScalingPolicy(
            self,
            "ScaleUp",
            policy_name="PolicyScaleUp",
            policy_type="StepScaling",
            scaling_target_id=scalable_target.scalable_target_id,
            step_scaling_policy_configuration=auto_scale.CfnScalingPolicy.
            StepScalingPolicyConfigurationProperty(
                adjustment_type="ChangeInCapacity",
                cooldown=120,
                metric_aggregation_type="Maximum",
                step_adjustments=[
                    auto_scale.CfnScalingPolicy.StepAdjustmentProperty(
                        scaling_adjustment=scaling_steps,
                        metric_interval_lower_bound=0,
                    ),
                ],
            ),
        )
        scale_up_trigger = aws_cloudwatch.CfnAlarm(  # noqa
            self,
            "ScaleUpTrigger",
            alarm_description="Scale up due to visible messages in queue",
            dimensions=[
                aws_cloudwatch.CfnAlarm.DimensionProperty(
                    name="QueueName",
                    value=queue.queue_name,
                ),
            ],
            metric_name="ApproximateNumberOfMessagesVisible",
            namespace="AWS/SQS",
            evaluation_periods=1,
            comparison_operator="GreaterThanThreshold",
            period=60,
            statistic="Maximum",
            threshold=0,
            alarm_actions=[scale_up.ref],
        )

        scale_down = auto_scale.CfnScalingPolicy(
            self,
            "ScaleDown",
            policy_name="PolicyScaleDown",
            policy_type="StepScaling",
            scaling_target_id=scalable_target.scalable_target_id,
            step_scaling_policy_configuration=auto_scale.CfnScalingPolicy.
            StepScalingPolicyConfigurationProperty(
                adjustment_type="ExactCapacity",
                cooldown=60,
                step_adjustments=[
                    auto_scale.CfnScalingPolicy.StepAdjustmentProperty(
                        scaling_adjustment=mincount,
                        metric_interval_upper_bound=0,
                    ),
                ],
            ),
        )

        scale_down_trigger = aws_cloudwatch.CfnAlarm(  # noqa
            self,
            "ScaleDownTrigger",
            alarm_description=
            "Scale down due to lack of in-flight messages in queue",
            dimensions=[
                aws_cloudwatch.CfnAlarm.DimensionProperty(
                    name="QueueName",
                    value=queue.queue_name,
                ),
            ],
            metric_name="TotalNumberOfMessages",
            namespace="AWS/SQS",
            evaluation_periods=1,
            comparison_operator="LessThanThreshold",
            period=120,
            statistic="Maximum",
            threshold=1,
            alarm_actions=[scale_down.ref],
        )
Esempio n. 23
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        # ********* SNS Topics *************
        jobCompletionTopic = sns.Topic(self, "JobCompletion")

        # **********IAM Roles******************************
        textractServiceRole = iam.Role(
            self,
            "TextractServiceRole",
            assumed_by=iam.ServicePrincipal("textract.amazonaws.com"),
        )
        textractServiceRole.add_to_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                resources=[jobCompletionTopic.topic_arn],
                actions=["sns:Publish"],
            ))
        comprehendServiceRole = iam.Role(
            self,
            "ComprehendServiceRole",
            assumed_by=iam.ServicePrincipal("comprehend.amazonaws.com"),
        )
        comprehendServiceRole.add_to_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                resources=["*"],
                actions=[
                    "comprehend:*",
                    "s3:ListAllMyBuckets",
                    "s3:ListBucket",
                    "s3:GetBucketLocation",
                    "iam:ListRoles",
                    "iam:GetRole",
                ],
            ))
        # **********S3 Batch Operations Role******************************
        s3BatchOperationsRole = iam.Role(
            self,
            "S3BatchOperationsRole",
            assumed_by=iam.ServicePrincipal(
                "batchoperations.s3.amazonaws.com"),
        )

        # **********S3 Bucket******************************
        # S3 bucket for input documents and output
        contentBucket = s3.Bucket(self, "DocumentsBucket", versioned=False)

        existingContentBucket = s3.Bucket(self,
                                          "ExistingDocumentsBucket",
                                          versioned=False)
        existingContentBucket.grant_read_write(s3BatchOperationsRole)

        inventoryAndLogsBucket = s3.Bucket(self,
                                           "InventoryAndLogsBucket",
                                           versioned=False)
        inventoryAndLogsBucket.grant_read_write(s3BatchOperationsRole)

        # **********DynamoDB Table*************************
        # DynamoDB table with links to output in S3
        outputTable = dynamodb.Table(
            self,
            "OutputTable",
            partition_key={
                "name": "documentId",
                "type": dynamodb.AttributeType.STRING,
            },
            sort_key={
                "name": "outputType",
                "type": dynamodb.AttributeType.STRING,
            },
        )

        # DynamoDB table with links to output in S3
        documentsTable = dynamodb.Table(
            self,
            "DocumentsTable",
            partition_key={
                "name": "documentId",
                "type": dynamodb.AttributeType.STRING,
            },
            stream=dynamodb.StreamViewType.NEW_IMAGE,
        )

        # **********SQS Queues*****************************
        # DLQ (Dead Letter Queue)
        dlq = sqs.Queue(
            self,
            "DLQ",
            visibility_timeout=core.Duration.seconds(30),
            retention_period=core.Duration.seconds(1209600),
        )

        # Input Queue for sync jobs
        syncJobsQueue = sqs.Queue(
            self,
            "SyncJobs",
            visibility_timeout=core.Duration.seconds(30),
            retention_period=core.Duration.seconds(1209600),
            dead_letter_queue={
                "queue": dlq,
                "max_receive_count": 50
            },
        )
        # Input Queue for async jobs
        asyncJobsQueue = sqs.Queue(
            self,
            "AsyncJobs",
            visibility_timeout=core.Duration.seconds(30),
            retention_period=core.Duration.seconds(1209600),
            dead_letter_queue={
                "queue": dlq,
                "max_receive_count": 50
            },
        )

        # Queue
        jobResultsQueue = sqs.Queue(
            self,
            "JobResults",
            visibility_timeout=core.Duration.seconds(900),
            retention_period=core.Duration.seconds(1209600),
            dead_letter_queue={
                "queue": dlq,
                "max_receive_count": 50
            },
        )
        # Trigger
        # jobCompletionTopic.subscribeQueue(jobResultsQueue)
        jobCompletionTopic.add_subscription(
            snsSubscriptions.SqsSubscription(jobResultsQueue))

        # **********Lambda Functions******************************

        # Helper Layer with helper functions
        helperLayer = _lambda.LayerVersion(
            self,
            "HelperLayer",
            code=_lambda.Code.from_asset("awscdk/lambda/helper"),
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_7],
            license="Apache-2.0",
            description="Helper layer.",
        )

        # Textractor helper layer
        textractorLayer = _lambda.LayerVersion(
            self,
            "Textractor",
            code=_lambda.Code.from_asset("awscdk/lambda/textractor"),
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_7],
            license="Apache-2.0",
            description="Textractor layer.",
        )

        # -----------------------------------------------------------

        # S3 Event processor
        s3Processor = _lambda.Function(
            self,
            "S3Processor",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.from_asset("awscdk/lambda/s3processor"),
            handler="lambda_function.lambda_handler",
            environment={
                "SYNC_QUEUE_URL": syncJobsQueue.queue_url,
                "ASYNC_QUEUE_URL": asyncJobsQueue.queue_url,
                "DOCUMENTS_TABLE": documentsTable.table_name,
                "OUTPUT_TABLE": outputTable.table_name,
            },
        )
        # Layer
        s3Processor.add_layers(helperLayer)
        # Trigger
        s3Processor.add_event_source(
            S3EventSource(contentBucket, events=[s3.EventType.OBJECT_CREATED]))
        # Permissions
        documentsTable.grant_read_write_data(s3Processor)
        syncJobsQueue.grant_send_messages(s3Processor)
        asyncJobsQueue.grant_send_messages(s3Processor)

        # ------------------------------------------------------------

        # S3 Batch Operations Event processor
        s3BatchProcessor = _lambda.Function(
            self,
            "S3BatchProcessor",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.from_asset("awscdk/lambda/s3batchprocessor"),
            handler="lambda_function.lambda_handler",
            environment={
                "DOCUMENTS_TABLE": documentsTable.table_name,
                "OUTPUT_TABLE": outputTable.table_name,
            },
            reserved_concurrent_executions=1,
        )
        # Layer
        s3BatchProcessor.add_layers(helperLayer)
        # Permissions
        documentsTable.grant_read_write_data(s3BatchProcessor)
        s3BatchProcessor.grant_invoke(s3BatchOperationsRole)
        s3BatchOperationsRole.add_to_policy(
            iam.PolicyStatement(actions=["lambda:*"], resources=["*"]))

        # ------------------------------------------------------------

        # Document processor (Router to Sync/Async Pipeline)
        documentProcessor = _lambda.Function(
            self,
            "TaskProcessor",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.from_asset("awscdk/lambda/documentprocessor"),
            handler="lambda_function.lambda_handler",
            environment={
                "SYNC_QUEUE_URL": syncJobsQueue.queue_url,
                "ASYNC_QUEUE_URL": asyncJobsQueue.queue_url,
            },
        )
        # Layer
        documentProcessor.add_layers(helperLayer)
        # Trigger
        documentProcessor.add_event_source(
            DynamoEventSource(
                documentsTable,
                starting_position=_lambda.StartingPosition.TRIM_HORIZON,
            ))

        # Permissions
        documentsTable.grant_read_write_data(documentProcessor)
        syncJobsQueue.grant_send_messages(documentProcessor)
        asyncJobsQueue.grant_send_messages(documentProcessor)

        # ------------------------------------------------------------

        # Sync Jobs Processor (Process jobs using sync APIs)
        syncProcessor = _lambda.Function(
            self,
            "SyncProcessor",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.from_asset("awscdk/lambda/documentprocessor"),
            handler="lambda_function.lambda_handler",
            environment={
                "OUTPUT_TABLE": outputTable.table_name,
                "DOCUMENTS_TABLE": documentsTable.table_name,
                "AWS_DATA_PATH": "models",
            },
            reserved_concurrent_executions=1,
            timeout=core.Duration.seconds(25),
        )
        # Layer
        syncProcessor.add_layers(helperLayer)
        syncProcessor.add_layers(textractorLayer)
        # Trigger
        syncProcessor.add_event_source(
            SqsEventSource(syncJobsQueue, batch_size=1))
        # Permissions
        contentBucket.grant_read_write(syncProcessor)
        existingContentBucket.grant_read_write(syncProcessor)
        outputTable.grant_read_write_data(syncProcessor)
        documentsTable.grant_read_write_data(syncProcessor)
        syncProcessor.add_to_role_policy(
            iam.PolicyStatement(actions=["textract:*"], resources=["*"]))

        # ------------------------------------------------------------

        # Async Job Processor (Start jobs using Async APIs)
        asyncProcessor = _lambda.Function(
            self,
            "ASyncProcessor",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.from_asset("awscdk/lambda/asyncprocessor"),
            handler="lambda_function.lambda_handler",
            environment={
                "ASYNC_QUEUE_URL": asyncJobsQueue.queue_url,
                "SNS_TOPIC_ARN": jobCompletionTopic.topic_arn,
                "SNS_ROLE_ARN": textractServiceRole.role_arn,
                "AWS_DATA_PATH": "models",
            },
            reserved_concurrent_executions=1,
            timeout=core.Duration.seconds(60),
        )
        # asyncProcessor.addEnvironment("SNS_TOPIC_ARN", textractServiceRole.topic_arn)
        # Layer
        asyncProcessor.add_layers(helperLayer)
        # Triggers
        # Run async job processor every 5 minutes
        # Enable code below after test deploy
        rule = events.Rule(
            self,
            "Rule",
            schedule=events.Schedule.expression("rate(2 minutes)"))
        rule.add_target(LambdaFunction(asyncProcessor))

        # Run when a job is successfully complete
        asyncProcessor.add_event_source(SnsEventSource(jobCompletionTopic))
        # Permissions
        contentBucket.grant_read(asyncProcessor)
        existingContentBucket.grant_read_write(asyncProcessor)
        asyncJobsQueue.grant_consume_messages(asyncProcessor)
        asyncProcessor.add_to_role_policy(
            iam.PolicyStatement(
                actions=["iam:PassRole"],
                resources=[textractServiceRole.role_arn],
            ))
        asyncProcessor.add_to_role_policy(
            iam.PolicyStatement(actions=["textract:*"], resources=["*"]))
        # ------------------------------------------------------------

        # Async Jobs Results Processor
        jobResultProcessor = _lambda.Function(
            self,
            "JobResultProcessor",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.from_asset("awscdk/lambda/jobresultprocessor"),
            handler="lambda_function.lambda_handler",
            memory_size=2000,
            reserved_concurrent_executions=50,
            timeout=core.Duration.seconds(900),
            environment={
                "OUTPUT_TABLE": outputTable.table_name,
                "DOCUMENTS_TABLE": documentsTable.table_name,
                "AWS_DATA_PATH": "models",
            },
        )
        # Layer
        jobResultProcessor.add_layers(helperLayer)
        jobResultProcessor.add_layers(textractorLayer)
        # Triggers
        jobResultProcessor.add_event_source(
            SqsEventSource(jobResultsQueue, batch_size=1))
        # Permissions
        outputTable.grant_read_write_data(jobResultProcessor)
        documentsTable.grant_read_write_data(jobResultProcessor)
        contentBucket.grant_read_write(jobResultProcessor)
        existingContentBucket.grant_read_write(jobResultProcessor)
        jobResultProcessor.add_to_role_policy(
            iam.PolicyStatement(actions=["textract:*", "comprehend:*"],
                                resources=["*"]))

        # --------------
        # PDF Generator
        pdfGenerator = _lambda.Function(
            self,
            "PdfGenerator",
            runtime=_lambda.Runtime.JAVA_8,
            code=_lambda.Code.from_asset("awscdk/lambda/pdfgenerator"),
            handler="DemoLambdaV2::handleRequest",
            memory_size=3000,
            timeout=core.Duration.seconds(900),
        )
        contentBucket.grant_read_write(pdfGenerator)
        existingContentBucket.grant_read_write(pdfGenerator)
        pdfGenerator.grant_invoke(syncProcessor)
        pdfGenerator.grant_invoke(asyncProcessor)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ddb_whats_new_podcast = _ddb.Table(
            self,
            'whats-new-podcast-table',
            partition_key={
                'name': 'guid',
                'type': _ddb.AttributeType.STRING
            },
            table_name='whats-new-podcast',
        )

        ddb_script = _ddb.Table(self,
                                'whats-new-podcast-script',
                                partition_key={
                                    'name': 'date',
                                    'type': _ddb.AttributeType.STRING
                                },
                                table_name='whats-new-podcast-script')

        sqs_whats_new_podcast = _sqs.Queue(
            self,
            'whats-new-podcast-queue',
            queue_name='whats-new-podcast-queue',
        )

        sns_whats_new_podcast = _sns.Topic(
            self,
            'whats-new-podcast-topic',
            display_name='AWS News',
            topic_name='whats-new-podcast-topic',
        )
        sns_whats_new_podcast.add_subscription(
            _sns_subs.SqsSubscription(sqs_whats_new_podcast))

        s3_podcast_bucket = _s3.Bucket(
            self,
            "whats-new-podcast-bucket",
            bucket_name=
            f"whats-new-podcast-bucket-{random.randrange(1000000000)}")

        statement_sns_publish = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            actions=['sns:Publish'],
            resources=[sns_whats_new_podcast.topic_arn])
        statement_dynamodb = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            actions=['dynamodb:PutItem', 'dynamodb:GetItem'],
            resources=[ddb_whats_new_podcast.table_arn])

        lambda_rss_to_sns = _lambda.Function(
            self,
            'whats-new-podcast-rss-to-sns',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda/rss-to-sns'),
            handler='rss-to-sns.lambda_handler',
            environment={
                'DYNAMO_DB_TABLE': ddb_whats_new_podcast.table_name,
                'SNS_TOPIC': sns_whats_new_podcast.topic_arn
            },
            function_name='whats-new-podcast-rss-to-sns',
            timeout=core.Duration.seconds(30),
        )
        lambda_rss_to_sns.add_to_role_policy(statement_dynamodb)
        lambda_rss_to_sns.add_to_role_policy(statement_sns_publish)

        rule_rss = _events.Rule(self,
                                'whats-new-podcast-rss-rule',
                                schedule=_events.Schedule.cron(minute='0/15',
                                                               hour='1-23/2',
                                                               month='*',
                                                               week_day='*',
                                                               year='*'),
                                rule_name='whats-new-podcast-rss-rule')
        rule_rss.add_target(_targets.LambdaFunction(lambda_rss_to_sns))

        lambda_generate_script = _lambda.Function(
            self,
            'whats-new-podcast-generate-script',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda/generate-script'),
            handler='generate-script.lambda_handler',
            environment={'DYNAMO_DB_TABLE': ddb_script.table_name},
            function_name='whats-new-podcast-generate-script',
            timeout=core.Duration.seconds(30),
        )
        statement_lambda_generate_script_ddb = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            actions=[
                'dynamodb:PutItem',
                'dynamodb:GetItem',
                'dynamodb:Query',
                'dynamodb:UpdateItem',
            ],
            resources=[ddb_script.table_arn])
        lambda_generate_script.add_to_role_policy(
            statement_lambda_generate_script_ddb)
        lambda_generate_script_source_queue = _lambda_events.SqsEventSource(
            queue=sqs_whats_new_podcast, batch_size=1)
        lambda_generate_script.add_event_source(
            lambda_generate_script_source_queue)

        lambda_generate_voice = _lambda.Function(
            self,
            'whats-new-podcast-generate-voice',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda/generate-voice'),
            handler='generate-voice.lambda_handler',
            environment={
                'DYNAMO_DB_TABLE': ddb_script.table_name,
                'S3_BUCKET': s3_podcast_bucket.bucket_name
            },
            function_name='whats-new-podcast-generate-voice',
            timeout=core.Duration.seconds(30),
        )
        statement_lambda_generate_voice_ddb = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            actions=[
                'dynamodb:GetItem',
                'dynamodb:Query',
            ],
            resources=[ddb_script.table_arn])
        statement_lambda_generate_voice_polly = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            actions=[
                'polly:SynthesizeSpeech', 'polly:StartSpeechSynthesisTask'
            ],
            resources=['*'])
        statement_lambda_generate_voice_s3 = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            actions=['s3:PutObject'],
            resources=[f"{s3_podcast_bucket.bucket_arn}/*"])
        lambda_generate_voice.add_to_role_policy(
            statement_lambda_generate_voice_ddb)
        lambda_generate_voice.add_to_role_policy(
            statement_lambda_generate_voice_polly)
        lambda_generate_voice.add_to_role_policy(
            statement_lambda_generate_voice_s3)

        rule_voice = _events.Rule(
            self,
            'whats-new-podcast-generate-voice-rule',
            schedule=_events.Schedule.cron(minute='30',
                                           hour='0',
                                           month='*',
                                           week_day='*',
                                           year='*'),
            rule_name='whats-new-podcast-generate-voice-rule')
        rule_voice.add_target(_targets.LambdaFunction(lambda_generate_voice))
Esempio n. 25
0
    def create_all_queues(self) -> None:
        """
        Create all STACK queues, attach subscriptions and alarms
        """

        # General DLQs for lambdas (not API)
        self.create_queue(id="dead_letter_queue")
        general_dlq_alarm = cloudwatch.Alarm(
            self,
            "DLQAlarm",
            metric=self.queues_["dead_letter_queue"].metric(
                "ApproximateNumberOfMessagesVisible"),
            evaluation_periods=1,
            threshold=0.0,
            comparison_operator=ComparisonOperator.GREATER_THAN_THRESHOLD,
        )
        general_dlq_alarm.add_alarm_action(
            cw_actions.SnsAction(self.topics_["alarm_topic"]))

        # DLQ for API lambdas
        self.create_queue(id="api_dead_letter_queue")
        api_dlq_alarm = cloudwatch.Alarm(
            self,
            "APIDLQAlarm",
            metric=self.queues_["api_dead_letter_queue"].metric(
                "ApproximateNumberOfMessagesVisible"),
            evaluation_periods=1,
            threshold=0.0,
            comparison_operator=ComparisonOperator.GREATER_THAN_THRESHOLD,
        )
        api_dlq_alarm.add_alarm_action(
            cw_actions.SnsAction(self.topics_["alarm_topic"]))

        # The new_scenes_queue subscribe to CBERS 4/4A quicklooks notifications. The
        # STAC items are generated from the original INPE metadata file as
        # soon as the quicklooks are created in the PDS bucket
        # This code fragment creates the queue, the associated dlq and
        # subscribe to CBERS 4/4A quicklook notification topics
        self.create_queue(
            id="process_new_scenes_queue_dlq",
            retention_period=core.Duration.seconds(1209600),
        )
        process_new_scenes_queue_alarm = cloudwatch.Alarm(
            self,
            "ProcessNewScenesQueueAlarm",
            metric=self.queues_["process_new_scenes_queue_dlq"].metric(
                "ApproximateNumberOfMessagesVisible"),
            evaluation_periods=1,
            threshold=0.0,
            comparison_operator=ComparisonOperator.GREATER_THAN_THRESHOLD,
        )
        process_new_scenes_queue_alarm.add_alarm_action(
            cw_actions.SnsAction(self.topics_["alarm_topic"]))
        self.create_queue(
            id="new_scenes_queue",
            visibility_timeout=core.Duration.seconds(385),
            retention_period=core.Duration.seconds(1209600),
            dead_letter_queue=sqs.DeadLetterQueue(
                max_receive_count=1,
                queue=self.queues_["process_new_scenes_queue_dlq"]),
        )
        # Add subscriptions for each CB4 camera
        sns.Topic.from_topic_arn(
            self,
            id="CB4MUX",
            topic_arn="arn:aws:sns:us-east-1:599544552497:NewCB4MUXQuicklook",
        ).add_subscription(
            sns_subscriptions.SqsSubscription(
                self.queues_["new_scenes_queue"]))
        sns.Topic.from_topic_arn(
            self,
            id="CB4AWFI",
            topic_arn="arn:aws:sns:us-east-1:599544552497:NewCB4AWFIQuicklook",
        ).add_subscription(
            sns_subscriptions.SqsSubscription(
                self.queues_["new_scenes_queue"]))
        sns.Topic.from_topic_arn(
            self,
            id="CB4PAN10M",
            topic_arn=
            "arn:aws:sns:us-east-1:599544552497:NewCB4PAN10MQuicklook",
        ).add_subscription(
            sns_subscriptions.SqsSubscription(
                self.queues_["new_scenes_queue"]))
        sns.Topic.from_topic_arn(
            self,
            id="CBPAN5M",
            topic_arn="arn:aws:sns:us-east-1:599544552497:NewCB4PAN5MQuicklook",
        ).add_subscription(
            sns_subscriptions.SqsSubscription(
                self.queues_["new_scenes_queue"]))
        # Subscription for CB4A (all cameras)
        sns.Topic.from_topic_arn(
            self,
            id="CB4A",
            topic_arn="arn:aws:sns:us-east-1:599544552497:NewCB4AQuicklook",
        ).add_subscription(
            sns_subscriptions.SqsSubscription(
                self.queues_["new_scenes_queue"]))

        self.create_queue(
            id="catalog_prefix_update_queue",
            visibility_timeout=core.Duration.seconds(60),
            retention_period=core.Duration.seconds(1209600),
            dead_letter_queue=sqs.DeadLetterQueue(
                max_receive_count=3, queue=self.queues_["dead_letter_queue"]),
        )

        # Reconcile queue for INPE's XML metadata
        self.create_queue(
            id="consume_reconcile_queue_dlq",
            retention_period=core.Duration.seconds(1209600),
        )
        consume_reconcile_queue_alarm = cloudwatch.Alarm(
            self,
            "ConsumeReconcileQueueAlarm",
            metric=self.queues_["consume_reconcile_queue_dlq"].metric(
                "ApproximateNumberOfMessagesVisible"),
            evaluation_periods=1,
            threshold=0.0,
            comparison_operator=ComparisonOperator.GREATER_THAN_THRESHOLD,
        )
        consume_reconcile_queue_alarm.add_alarm_action(
            cw_actions.SnsAction(self.topics_["alarm_topic"]))
        self.create_queue(
            id="reconcile_queue",
            visibility_timeout=core.Duration.seconds(1000),
            retention_period=core.Duration.seconds(1209600),
            dead_letter_queue=sqs.DeadLetterQueue(
                max_receive_count=3,
                queue=self.queues_["consume_reconcile_queue_dlq"]),
        )

        # Reconcile queue for STAC items
        self.create_queue(
            id="consume_stac_reconcile_queue_dlq",
            retention_period=core.Duration.seconds(1209600),
        )
        consume_stac_reconcile_queue_alarm = cloudwatch.Alarm(
            self,
            "ConsumeStacReconcileQueueAlarm",
            metric=self.queues_["consume_stac_reconcile_queue_dlq"].metric(
                "ApproximateNumberOfMessagesVisible"),
            evaluation_periods=1,
            threshold=0.0,
            comparison_operator=ComparisonOperator.GREATER_THAN_THRESHOLD,
        )
        consume_stac_reconcile_queue_alarm.add_alarm_action(
            cw_actions.SnsAction(self.topics_["alarm_topic"]))
        self.create_queue(
            id="stac_reconcile_queue",
            visibility_timeout=core.Duration.seconds(1000),
            retention_period=core.Duration.seconds(1209600),
            dead_letter_queue=sqs.DeadLetterQueue(
                max_receive_count=3,
                queue=self.queues_["consume_stac_reconcile_queue_dlq"],
            ),
        )

        # Queue for STAC items to be inserted into Elasticsearch. Subscribe to the
        # topic with new stac items
        self.create_queue(
            id="insert_into_elasticsearch_queue",
            visibility_timeout=core.Duration.seconds(180),
            retention_period=core.Duration.seconds(1209600),
            dead_letter_queue=sqs.DeadLetterQueue(
                max_receive_count=3, queue=self.queues_["dead_letter_queue"]),
        )
        # Subscription for new item topics
        self.topics_["stac_item_topic"].add_subscription(
            sns_subscriptions.SqsSubscription(
                self.queues_["insert_into_elasticsearch_queue"]))
        # Subscription for reconciled item topics
        self.topics_["reconcile_stac_item_topic"].add_subscription(
            sns_subscriptions.SqsSubscription(
                self.queues_["insert_into_elasticsearch_queue"]))

        # Backup queue for STAC items inserted into Elasticsearch.
        # This holds the same items received by "insert_into_elasticsearch_queue",
        # simply holding them for some time to allow recover from ES
        # cluster failures (see #78)
        # This queue subscribe only to new item topics
        self.create_queue(
            id="backup_insert_into_elasticsearch_queue",
            visibility_timeout=core.Duration.seconds(180),
            retention_period=core.Duration.days(
                settings.backup_queue_retention_days),
            dead_letter_queue=sqs.DeadLetterQueue(
                max_receive_count=3, queue=self.queues_["dead_letter_queue"]),
        )
        # Subscription for new item topics
        self.topics_["stac_item_topic"].add_subscription(
            sns_subscriptions.SqsSubscription(
                self.queues_["backup_insert_into_elasticsearch_queue"]))
Esempio n. 26
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        queue = sqs.Queue(
            self,
            "cnfqueue",
            visibility_timeout=core.Duration.seconds(300),
        )

        topic = sns.Topic(self, "cnftopic")

        az = "us-east-1a,us-east-1b,us-east-1c"
        cidr = ec2.CfnVPCCidrBlock(self,
                                   "cnfCidr",
                                   vpc_id="vpc-0948e89bfc65a24c2",
                                   cidr_block="10.124.0.0/22")

        # print(len(az.split(",")))
        if len(az.split(",")) == 3:
            sub1 = ec2.CfnSubnet(self,
                                 f"cnfnet1",
                                 availability_zone=str(az.split(",")[0]),
                                 cidr_block="10.124.0.0/24",
                                 vpc_id="vpc-0948e89bfc65a24c2")
            sub2 = ec2.CfnSubnet(self,
                                 f"cnfnet2",
                                 availability_zone=str(az.split(",")[1]),
                                 cidr_block="10.124.1.0/24",
                                 vpc_id="vpc-0948e89bfc65a24c2")
            sub3 = ec2.CfnSubnet(self,
                                 f"cnfnet3",
                                 availability_zone=str(az.split(",")[2]),
                                 cidr_block="10.124.2.0/25",
                                 vpc_id="vpc-0948e89bfc65a24c2")

            sub1.add_depends_on(cidr)
            sub2.add_depends_on(cidr)
            sub3.add_depends_on(cidr)

            # ec2.CfnSubnetRouteTableAssociation(self, "cnfnet1ass", "rtb-0acd4a7a6ab02e9d3", sub1.logical_id())
        else:
            sub1 = ec2.CfnSubnet(self,
                                 f"cnfnet1",
                                 availability_zone=str(az.split(",")[0]),
                                 cidr_block="10.124.0.0/24",
                                 vpc_id="vpc-0948e89bfc65a24c2")
            sub2 = ec2.CfnSubnet(self,
                                 f"cnfnet2",
                                 availability_zone=str(az.split(",")[1]),
                                 cidr_block="10.124.1.0/24",
                                 vpc_id="vpc-0948e89bfc65a24c2")

            sub1.add_depends_on(cidr)
            sub2.add_depends_on(cidr)

        count = 1
        ssmparam = ssm.StringParameter(self,
                                       f"subnet{count}",
                                       allowed_pattern=".*",
                                       description="The value Foo",
                                       parameter_name=f"FooParameter{count}",
                                       string_value="Foo",
                                       tier=ssm.ParameterTier.ADVANCED)

        # co=0
        # while co < 3:
        #     repository = ecr.Repository(self, f"Repository{co}")
        #     co=co+1

        topic.add_subscription(subs.SqsSubscription(queue))
Esempio n. 27
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ###
        # SNS Topic Creation
        # Our API Gateway posts messages directly to this
        ###
        topic = sns.Topic(self,
                          'theBigFanTopic',
                          display_name='The Big Fan CDK Pattern Topic')

        ###
        # SQS Subscribers creation for our SNS Topic
        # 2 subscribers, one for messages with a status of created one for any other message
        ###

        # Status:created SNS Subscriber Queue
        created_status_queue = sqs.Queue(
            self,
            'BigFanTopicStatusCreatedSubscriberQueue',
            visibility_timeout=core.Duration.seconds(300),
            queue_name='BigFanTopicStatusCreatedSubscriberQueue')

        # Only send messages to our created_status_queue with a status of created
        created_filter = sns.SubscriptionFilter.string_filter(
            whitelist=['created'])
        topic.add_subscription(
            subscriptions.SqsSubscription(
                created_status_queue,
                raw_message_delivery=True,
                filter_policy={'status': created_filter}))

        # Any other status SNS Subscriber Queue
        other_status_queue = sqs.Queue(
            self,
            'BigFanTopicAnyOtherStatusSubscriberQueue',
            visibility_timeout=core.Duration.seconds(300),
            queue_name='BigFanTopicAnyOtherStatusSubscriberQueue')

        # Only send messages to our other_status_queue that do not have a status of created
        other_filter = sns.SubscriptionFilter.string_filter(
            blacklist=['created'])
        topic.add_subscription(
            subscriptions.SqsSubscription(
                other_status_queue,
                raw_message_delivery=True,
                filter_policy={'status': other_filter}))

        ###
        # Creation of Lambdas that subscribe to above SQS queues
        ###

        # Created status queue lambda
        sqs_created_status_subscriber = _lambda.Function(
            self,
            "SQSCreatedStatusSubscribeLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="createdStatus.handler",
            code=_lambda.Code.from_asset("lambda_fns/subscribe"))
        created_status_queue.grant_consume_messages(
            sqs_created_status_subscriber)
        sqs_created_status_subscriber.add_event_source(
            _event.SqsEventSource(created_status_queue))

        # Any other status queue lambda
        sqs_other_status_subscriber = _lambda.Function(
            self,
            "SQSAnyOtherStatusSubscribeLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="anyOtherStatus.handler",
            code=_lambda.Code.from_asset("lambda_fns/subscribe"))
        other_status_queue.grant_consume_messages(sqs_other_status_subscriber)
        sqs_other_status_subscriber.add_event_source(
            _event.SqsEventSource(other_status_queue))

        ###
        # API Gateway Creation
        # This is complicated because it transforms the incoming json payload into a query string url
        # this url is used to post the payload to sns without a lambda inbetween
        ###

        gateway = api_gw.RestApi(
            self,
            'theBigFanAPI',
            deploy_options=api_gw.StageOptions(
                metrics_enabled=True,
                logging_level=api_gw.MethodLoggingLevel.INFO,
                data_trace_enabled=True,
                stage_name='prod'))

        # Give our gateway permissions to interact with SNS
        api_gw_sns_role = iam.Role(
            self,
            'DefaultLambdaHanderRole',
            assumed_by=iam.ServicePrincipal('apigateway.amazonaws.com'))
        topic.grant_publish(api_gw_sns_role)

        # shortening the lines of later code
        schema = api_gw.JsonSchema
        schema_type = api_gw.JsonSchemaType

        # Because this isn't a proxy integration, we need to define our response model
        response_model = gateway.add_model(
            'ResponseModel',
            content_type='application/json',
            model_name='ResponseModel',
            schema=schema(
                schema=api_gw.JsonSchemaVersion.DRAFT4,
                title='pollResponse',
                type=schema_type.OBJECT,
                properties={'message': schema(type=schema_type.STRING)}))

        error_response_model = gateway.add_model(
            'ErrorResponseModel',
            content_type='application/json',
            model_name='ErrorResponseModel',
            schema=schema(schema=api_gw.JsonSchemaVersion.DRAFT4,
                          title='errorResponse',
                          type=schema_type.OBJECT,
                          properties={
                              'state': schema(type=schema_type.STRING),
                              'message': schema(type=schema_type.STRING)
                          }))

        request_template = "Action=Publish&" + \
                           "TargetArn=$util.urlEncode('" + topic.topic_arn + "')&" + \
                           "Message=$util.urlEncode($input.path('$.message'))&" + \
                           "Version=2010-03-31&" + \
                           "MessageAttributes.entry.1.Name=status&" + \
                           "MessageAttributes.entry.1.Value.DataType=String&" + \
                           "MessageAttributes.entry.1.Value.StringValue=$util.urlEncode($input.path('$.status'))"

        # This is the VTL to transform the error response
        error_template = {
            "state": 'error',
            "message": "$util.escapeJavaScript($input.path('$.errorMessage'))"
        }
        error_template_string = json.dumps(error_template,
                                           separators=(',', ':'))

        # This is how our gateway chooses what response to send based on selection_pattern
        integration_options = api_gw.IntegrationOptions(
            credentials_role=api_gw_sns_role,
            request_parameters={
                'integration.request.header.Content-Type':
                "'application/x-www-form-urlencoded'"
            },
            request_templates={"application/json": request_template},
            passthrough_behavior=api_gw.PassthroughBehavior.NEVER,
            integration_responses=[
                api_gw.IntegrationResponse(
                    status_code='200',
                    response_templates={
                        "application/json":
                        json.dumps({"message": 'message added to topic'})
                    }),
                api_gw.IntegrationResponse(
                    selection_pattern="^\[Error\].*",
                    status_code='400',
                    response_templates={
                        "application/json": error_template_string
                    },
                    response_parameters={
                        'method.response.header.Content-Type':
                        "'application/json'",
                        'method.response.header.Access-Control-Allow-Origin':
                        "'*'",
                        'method.response.header.Access-Control-Allow-Credentials':
                        "'true'"
                    })
            ])

        # Add an SendEvent endpoint onto the gateway
        gateway.root.add_resource('SendEvent') \
            .add_method('POST', api_gw.Integration(type=api_gw.IntegrationType.AWS,
                                                   integration_http_method='POST',
                                                   uri='arn:aws:apigateway:us-east-1:sns:path//',
                                                   options=integration_options
                                                   ),
                        method_responses=[
                            api_gw.MethodResponse(status_code='200',
                                                  response_parameters={
                                                      'method.response.header.Content-Type': True,
                                                      'method.response.header.Access-Control-Allow-Origin': True,
                                                      'method.response.header.Access-Control-Allow-Credentials': True
                                                  },
                                                  response_models={
                                                      'application/json': response_model
                                                  }),
                            api_gw.MethodResponse(status_code='400',
                                                  response_parameters={
                                                      'method.response.header.Content-Type': True,
                                                      'method.response.header.Access-Control-Allow-Origin': True,
                                                      'method.response.header.Access-Control-Allow-Credentials': True
                                                  },
                                                  response_models={
                                                      'application/json': error_response_model
                                                  }),
                        ]
                        )