Example #1
0
 def create_s3(self,
               id: str = guid('S3-'),
               versioned: bool = False,
               type: str = 's3'):
     """
     Creates an S3 bucket.
     :param: id
     :param: versioned
     :param: type: s3/Glacier/Glacier Deep Archive
     :return: The bucket.
     """
     bucket = s3.Bucket(self,
                        id,
                        versioned=versioned,
                        cors=[
                            s3.CorsRule(
                                allowed_methods=[s3.HttpMethods.GET],
                                allowed_origins=['*'])
                        ])
     return bucket
Example #2
0
def base_bucket(construct, **kwargs):
    """
    Function that generates an S3 Bucket.
    :param construct: Custom construct that will use this function. From the external construct is usually 'self'.
    :param kwargs:
    :return: S3 Bucket Construct.
    """
    bucket_name = construct.prefix + "-" + kwargs[
        "bucket_name"] + "-bucket-" + construct.environment_
    parsed_bucket_name = bucket_name.replace("_", "-")
    versioned = kwargs.get("versioned")
    public_read_access = kwargs["public_read_access"]
    cors_settings = kwargs.get("cors")
    website_error_document = kwargs.get("website", {}).get("error")
    website_index_document = kwargs.get("website", {}).get("index")

    if cors_settings is not None:
        allowed_methods = [
            value for value in list(s3.HttpMethods)
            if value.value in cors_settings["allowed_methods"]
        ]
        cors_settings = s3.CorsRule(
            allowed_methods=allowed_methods,
            allowed_origins=cors_settings["allowed_origins"])
        cors_settings = [cors_settings]

    bucket = s3.Bucket(
        construct,
        id=parsed_bucket_name,
        bucket_name=parsed_bucket_name,
        cors=cors_settings,
        versioned=versioned,
        website_error_document=website_error_document,
        website_index_document=website_index_document,
    )

    if public_read_access is True:
        bucket.grant_public_access()

    return bucket
    def __init__(self, scope: core.Construct, id: str,
                 infra: RtspBaseResourcesConstruct, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create the inventory bucket...
        self.inventories = s3.Bucket(
            self,
            'InventoryBucket',
            bucket_name='homenet-{}.rtsp-inventories.{}.virtual.world'.format(
                infra.landing_zone.zone_name,
                core.Stack.of(self).region).lower(),
            removal_policy=core.RemovalPolicy.DESTROY,
            cors=[
                s3.CorsRule(allowed_methods=[s3.HttpMethods.GET],
                            allowed_origins=['*'])
            ],
            lifecycle_rules=[
                s3.LifecycleRule(
                    id='Transition-to-IA-after-30D',
                    prefix='eufy/',
                    abort_incomplete_multipart_upload_after=core.Duration.days(
                        7),
                    transitions=[
                        s3.Transition(
                            storage_class=s3.StorageClass.INFREQUENT_ACCESS,
                            transition_after=core.Duration.days(30))
                    ])
            ])

        # Create inventory collections for the Eufy Homebases...
        infra.bucket.add_inventory(
            objects_prefix='eufy/',
            inventory_id='{}-InventoryReport'.format('EufyFull'),
            format=s3.InventoryFormat.CSV,
            frequency=s3.InventoryFrequency.DAILY,
            include_object_versions=s3.InventoryObjectVersion.CURRENT,
            destination=s3.InventoryDestination(
                bucket=self.inventories,
                bucket_owner=core.Aws.ACCOUNT_ID,
                prefix=None))

        for base_name in ['Moonbase', 'Starbase']:
            prefix = 'eufy/{}.cameras.real.world/'.format(base_name).lower()
            infra.bucket.add_inventory(
                objects_prefix=prefix,
                inventory_id='{}-InventoryReport'.format(base_name),
                format=s3.InventoryFormat.CSV,
                frequency=s3.InventoryFrequency.DAILY,
                include_object_versions=s3.InventoryObjectVersion.CURRENT,
                destination=s3.InventoryDestination(
                    bucket=self.inventories,
                    bucket_owner=core.Aws.ACCOUNT_ID,
                    prefix=None))

        # Broadcast inventory creation events...
        self.inventoryAvailable = sns.Topic(
            self,
            'InventoryAvailable',
            display_name='HomeNet-{}-Rtsp-InventoryAvailable'.format(
                infra.landing_zone.zone_name),
            topic_name='HomeNet-{}-Rtsp-InventoryAvailable'.format(
                infra.landing_zone.zone_name))

        self.inventories.add_event_notification(
            s3.EventType.OBJECT_CREATED,
            s3n.SnsDestination(topic=self.inventoryAvailable),
            s3.NotificationKeyFilter(suffix='manifest.json'))

        # Attach debug queue to the notification
        self.inventoryAvailable.add_subscription(
            subs.SqsSubscription(
                raw_message_delivery=True,
                queue=sqs.Queue(
                    self,
                    'InventoryDebugQueue',
                    removal_policy=core.RemovalPolicy.DESTROY,
                    retention_period=core.Duration.days(7),
                    queue_name='HomeNet-{}-RtspInventoryAvailable_Debug'.
                    format(infra.landing_zone.zone_name))))

        # Subscribe the GroundTruth Manifest Generator
        groundtruth = RtspGroundTruthManifestGenerationFunction(
            self,
            'GroundTruthManifest',
            infra=infra,
            topic=self.inventoryAvailable)

        self.inventories.grant_read_write(groundtruth.function.role)

        # Create the RtspNormalizeImage S3 Object Lambda
        RtspNormalizeImageAccessPoint(scope=self,
                                      id='NormalizedImage',
                                      infra=infra)
Example #4
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        layer = aws.PipLayers(self,
                              "scoreboard_layer",
                              layers={
                                  "htmlgen": "htmlgen/requirements.txt",
                                  "parse_globals":
                                  "parse_globals/requirements.txt",
                                  "bot": "bot/requirements.txt"
                              })

        # Create
        # * the generator function
        # * Namemap-table
        #   * allow generator to read from namemap-table
        #   (This might change - why not pass the mapping structure in the message?)
        # * Datacache-bucket
        #   * Allow generator to read and write to bucket

        htmlgen = aws.Function(self,
                               "htmlgen",
                               layers=layer.layers,
                               timeout=core.Duration.seconds(20),
                               memory_size=1024)

        # id: str (boardid), name: str (username), value: str (replacement value)
        namemap = aws.Table(self,
                            "namemap",
                            sort_key=aws_dynamodb.Attribute(
                                name='name',
                                type=aws_dynamodb.AttributeType.STRING),
                            removal_policy=CONFIGDATA)
        namemap.grant_read_data(htmlgen)

        no_point_days = aws.Table(self, "nopointdays")

        # id: str (boardid), day: int, results_1: dict ({player: score, ...}), results_2: dict ({player: score, ...})
        globalscores = aws.Table(self,
                                 "globalscores",
                                 partition_key=aws_dynamodb.Attribute(
                                     name='year',
                                     type=aws_dynamodb.AttributeType.NUMBER),
                                 sort_key=aws_dynamodb.Attribute(
                                     name='day',
                                     type=aws_dynamodb.AttributeType.NUMBER),
                                 removal_policy=EPHEMERALDATA)
        parse_globals = aws.Function(self,
                                     "parse_globals",
                                     layers=layer.layers,
                                     timeout=core.Duration.seconds(20),
                                     memory_size=1024)
        parse_globals.add_environment("DDB_GLOBALSCORES",
                                      globalscores.table_name)
        globalscores.grant_read_write_data(parse_globals)
        globalscores.grant_read_data(htmlgen)

        timestamps = aws.Table(self,
                               "timestamps",
                               removal_policy=EPHEMERALDATA)
        htmlgen.add_environment("DDB_TIMESTAMPS", timestamps.table_name)
        timestamps.grant_write_data(htmlgen)

        datacache = aws.Bucket(self, "datacache")
        datacache.grant_read_write(htmlgen)

        htmlbucket = aws.Bucket(
            self,
            "html",
            removal_policy=EPHEMERALDATA,
            auto_delete_objects=True,
            block_public_access=None,
            website_error_document="error.html",
            website_index_document="scoreboard.html",
            cors=[
                aws_s3.CorsRule(allowed_methods=[aws_s3.HttpMethods.GET],
                                allowed_headers=["*"],
                                allowed_origins=["*"])
            ])
        htmlbucket.grant_public_access()
        core.CfnOutput(self,
                       f"{id}_bucketurl",
                       value=f"BUCKET_URL={htmlbucket.bucket_website_url}")
        htmlbucket.grant_read_write(htmlgen)
        htmlgen.add_environment("S3_DATACACHE", datacache.bucket_name)
        htmlgen.add_environment("S3_HTML", htmlbucket.bucket_name)
        htmlgen.add_environment("DDB_NAMEMAP", namemap.table_name)

        aws_s3_deployment.BucketDeployment(
            self,
            "StaticHtml",
            sources=[aws_s3_deployment.Source.asset("htmlgen/frontend")],
            destination_bucket=htmlbucket,
            prune=False)

        # Create
        # * spawner function
        # * boardconfig-table
        #   * allow spawner to read from boardconfig-table
        # * generator_queue
        #   allow spawner to post messages to queue
        spawner = aws.Function(self, "spawner", layers=layer.layers)
        boardconfig = aws.Table(
            self,
            "boardconfig",
            stream=aws_dynamodb.StreamViewType.NEW_AND_OLD_IMAGES,
            removal_policy=CONFIGDATA)
        boardconfig.grant_read_data(spawner)
        spawner.add_environment("DDB_CONFIG", boardconfig.table_name)
        spawner.add_environment("DDB_NOPOINTDAYS", no_point_days.table_name)

        boardconfig_source = aws_lambda_event_sources.DynamoEventSource(
            boardconfig, starting_position=aws_lambda.StartingPosition.LATEST)

        boarddeletions = aws.Function(self, "boarddeletions")
        boarddeletions.add_event_source(boardconfig_source)
        boarddeletions.add_environment("S3_HTML", htmlbucket.bucket_name)
        htmlbucket.grant_read_write(boarddeletions)

        generator_queue = aws.Queue(self, "generator_queue")
        generator_queue.grant_send_messages(spawner)
        spawner.add_environment("SQS_GENERATOR", generator_queue.queue_name)
        spawner.add_environment("DDB_TIMESTAMPS", timestamps.table_name)
        timestamps.grant_read_data(spawner)

        # Connect the generator_queue to the htmlgen-function
        event_source = aws_lambda_event_sources.SqsEventSource(generator_queue,
                                                               batch_size=10)
        htmlgen.add_event_source(event_source)

        # Admin API
        adminhandler = aws.Function(self, "adminhandler")
        adminhandlerApi = aws_apigateway.LambdaRestApi(self,
                                                       "adminapi",
                                                       handler=adminhandler)
        core.CfnOutput(self,
                       "root_url",
                       value=f"Admin URL={adminhandlerApi.url_for_path()}")
        adminhandler.add_environment("DDB_CONFIG", boardconfig.table_name)
        boardconfig.grant_read_write_data(adminhandler)

        # Slack API
        api = aws.RestApi(self, "slack")

        slack = aws.ResourceWithLambda(
            self,
            "bot",
            verb="POST",
            description="Handle incoming Slack-bot interaction",
            parent_resource=api.root,
            lambda_layers=[layer.idlayers["bot"]])
        slack.handler.add_environment(
            "BOT_TOKEN", read_token_from_file('slack_bot_token.txt'))
        slack.handler.add_environment(
            "BOT_VERIFICATION",
            read_token_from_file('slack_verification_token.txt'))
        # "xoxb-1033954193568-1654676166455-Vzom9aQY9NUjAYR5mhKZP70k")
        slack.handler.add_environment("DDB_CONFIG", boardconfig.table_name)
        slack.handler.add_environment("DDB_NAMEMAP", namemap.table_name)
        namemap.grant_read_write_data(slack.handler)
        boardconfig.grant_read_write_data(slack.handler)

        # aws.Rule(
        #     self,
        #     "Test",
        #     description="Remove after functions verified - Fire every minute for some duration in Februaryx",
        #     schedule=aws_events.Schedule.cron(minute="*", hour="*", week_day="2", month="FEB"),
        #     target=spawner)

        aws.Rule(self,
                 "RestOfYear",
                 description="Fire every week jan-novx",
                 schedule=aws_events.Schedule.cron(minute="0",
                                                   hour="4",
                                                   week_day="2",
                                                   month="JAN-NOV"),
                 target=spawner)
        aws.Rule(self,
                 "Mornings_December",
                 description="Every second minute 06-08 (CET) 1-25 decx",
                 schedule=aws_events.Schedule.cron(minute="0/2",
                                                   hour="6-7",
                                                   day="1-25",
                                                   month="DEC"),
                 target=spawner)
        aws.Rule(self,
                 "Daytime_December",
                 description="Every 20 minutes 08-15 (CET) 1-25 decx",
                 schedule=aws_events.Schedule.cron(minute="0/20",
                                                   hour="8-15",
                                                   day="1-25",
                                                   month="DEC"),
                 target=spawner)
        aws.Rule(self,
                 "Nighttime_December",
                 description="Every hour 00-6,14-24 (CET) 1-25 decx",
                 schedule=aws_events.Schedule.cron(minute="0",
                                                   hour="0-6,14-23",
                                                   day="1-25",
                                                   month="DEC"),
                 target=spawner)
        aws.Rule(self,
                 "EndOf_December",
                 description="Every hour 9-23 (CET) 25-31 decx",
                 schedule=aws_events.Schedule.cron(minute="0",
                                                   hour="9-23",
                                                   day="26-31",
                                                   month="DEC"),
                 target=spawner)
    def __init__(self, scope, id, **kwargs):
        super().__init__(scope, id, **kwargs)

        # Create random string to be used as suffix on some resource names
        resource_suffix = ''.join(
            random.choice(string.ascii_lowercase) for i in range(8))

        # Save it as SSM parameter to be used in runtime
        ssm.StringParameter(self,
                            "RESOURCE_SUFFIX",
                            string_value=resource_suffix,
                            parameter_name="RESOURCE_SUFFIX")

        # ====================================== VPC ======================================
        # Create VPC
        vpc = ec2.Vpc(self,
                      "sorterbot-vpc",
                      cidr="10.0.0.0/16",
                      enable_dns_support=True,
                      enable_dns_hostnames=True,
                      max_azs=2,
                      nat_gateways=0,
                      subnet_configuration=[
                          {
                              "subnetType": ec2.SubnetType.PUBLIC,
                              "name": "sorterbot-public-subnet-a",
                              "cidrMask": 24,
                          },
                          {
                              "subnetType": ec2.SubnetType.PUBLIC,
                              "name": "sorterbot-public-subnet-b",
                              "cidrMask": 24,
                          },
                      ])

        # Create security groups
        sg_vpc = ec2.SecurityGroup(self,
                                   "sorterbot-vpc-sg",
                                   vpc=vpc,
                                   allow_all_outbound=True,
                                   security_group_name="sorterbot-vpc-sg")
        sg_vpc.add_ingress_rule(sg_vpc, ec2.Port.all_traffic())

        sg_control = ec2.SecurityGroup(
            self,
            "sorterbot-control-sg",
            vpc=vpc,
            allow_all_outbound=True,
            security_group_name="sorterbot-control-sg")
        sg_control.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(22))
        sg_control.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(5432))
        sg_control.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(80))

        # ====================================== IAM ======================================
        cloud_role = iam.CfnRole(
            self,
            "SorterBotCloudRole",
            role_name="SorterBotCloudRole",
            assume_role_policy_document={
                "Version":
                "2012-10-17",
                "Statement": [{
                    "Sid": "",
                    "Effect": "Allow",
                    "Principal": {
                        "Service": "ecs-tasks.amazonaws.com"
                    },
                    "Action": "sts:AssumeRole"
                }]
            },
            managed_policy_arns=[
                "arn:aws:iam::aws:policy/AmazonS3FullAccess",
                "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy",
            ])

        # Create IAM policies
        iam.ManagedPolicy(self,
                          "SorterBotSecretsForECSPolicy",
                          managed_policy_name="SorterBotSecretsForECSPolicy",
                          roles=[cloud_role],
                          statements=[
                              iam.PolicyStatement(
                                  resources=["*"],
                                  actions=[
                                      "ssm:GetParameter", "ssm:GetParameters",
                                      "secretsmanager:GetSecretValue",
                                      "kms:Decrypt"
                                  ])
                          ])

        # ====================================== S3 ======================================
        # Create S3 buckets
        s3.Bucket(self,
                  f"sorterbot-{resource_suffix}",
                  bucket_name=f"sorterbot-{resource_suffix}",
                  removal_policy=core.RemovalPolicy.DESTROY)
        s3.Bucket(self,
                  f"sorterbot-weights-{resource_suffix}",
                  bucket_name=f"sorterbot-weights-{resource_suffix}",
                  removal_policy=core.RemovalPolicy.DESTROY)
        s3.Bucket(self,
                  f"sorterbot-static-{resource_suffix}",
                  bucket_name=f"sorterbot-static-{resource_suffix}",
                  removal_policy=core.RemovalPolicy.DESTROY,
                  cors=[
                      s3.CorsRule(allowed_methods=[s3.HttpMethods.GET],
                                  allowed_origins=["*"],
                                  allowed_headers=["*"])
                  ])

        # ====================================== EC2 ======================================
        # Create EC2 instance for Control Panel
        control_panel_instance = ec2.Instance(
            self,
            f"sorterbot-control-panel-{resource_suffix}",
            instance_name=
            f"sorterbot-control-panel-{resource_suffix}",  # Since deleted instances stay around for a while in terminated state, random suffix is needed to prevent errors when destroying stack # noqa: E501
            instance_type=ec2.InstanceType("t2.micro"),
            machine_image=ec2.MachineImage.latest_amazon_linux(
                generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2),
            vpc=vpc,
            key_name="sorterbot",
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            security_group=sg_control)

        control_panel_instance.add_to_role_policy(
            iam.PolicyStatement(resources=["*"],
                                actions=[
                                    "ec2:DescribeNetworkInterfaces",
                                    "ssm:GetParameter", "ecs:*", "s3:*"
                                ]))

        # ====================================== RDS ======================================
        # Declare connection details
        master_username = "******"
        master_user_password = core.SecretValue.ssm_secure("PG_PASS",
                                                           version="1")
        port = 5432

        # Create postgres database
        database = rds.DatabaseInstance(
            self,
            "sorterbot_postgres",
            allocated_storage=10,
            backup_retention=core.Duration.days(
                0
            ),  # Don't save backups since storing them is not covered by the Free Tier
            database_name="sorterbot",
            delete_automated_backups=True,
            deletion_protection=False,
            engine=rds.DatabaseInstanceEngine.POSTGRES,
            engine_version="11",
            instance_class=ec2.InstanceType("t2.micro"),  # Stay in Free Tier
            instance_identifier="sorterbot-postgres",
            master_username=master_username,
            master_user_password=master_user_password,
            port=port,
            storage_type=rds.StorageType.GP2,
            vpc=vpc,
            vpc_placement=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC
            ),  # Make DB publicly accessible (with credentials)
            removal_policy=core.RemovalPolicy.DESTROY)

        # Add ingress rule to allow external connections
        database.connections.allow_default_port_from_any_ipv4()

        # ====================================== ECR ======================================
        # Create ECR repository for Docker images
        ecr.Repository(self,
                       "sorterbot-ecr",
                       repository_name="sorterbot-ecr",
                       removal_policy=core.RemovalPolicy.DESTROY)

        # ====================================== ECS ======================================
        # Create ECS Cluster, Task Definition and Fargate Service
        ecs_cluster = ecs.Cluster(self,
                                  "sorterbot-ecs-cluster",
                                  vpc=vpc,
                                  cluster_name="sorterbot-ecs-cluster")
        task_definition = ecs.FargateTaskDefinition(
            self, "sorterbot-fargate-service", cpu=512, memory_limit_mib=4096)
        task_definition.add_container(
            "sorterbot-cloud-container",
            image=ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample"))
        ecs.FargateService(self,
                           "sorterbot-ecs-service",
                           cluster=ecs_cluster,
                           task_definition=task_definition,
                           assign_public_ip=True,
                           service_name="sorterbot-ecs-service",
                           desired_count=0,
                           security_group=sg_vpc)

        # Save resource suffix to disk to be used when destroying
        with open(
                Path(__file__).parents[1].joinpath("scripts", "variables",
                                                   "RESOURCE_SUFFIX"),
                "w") as outfile:
            outfile.write(resource_suffix)
Example #6
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 landing_zone: IVpcLandingZone,
                 subnet_group_name: str = 'Default',
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        self.__landing_zone = landing_zone
        self.__subnet_group_name = subnet_group_name

        # Init basic resources
        self.log_group = logs.LogGroup(
            self,
            'LogGroup',
            log_group_name='/homenet/video/',
            retention=logs.RetentionDays.ONE_DAY,
            removal_policy=core.RemovalPolicy.DESTROY)

        # Add security constraints
        self.security_group = landing_zone.security_group
        self.secrets = RtspCameraSecrets(self,
                                         'Secrets',
                                         landing_zone=landing_zone)

        # Create the stateful bucket
        bucket_name = 'homenet-{}.{}.virtual.world'.format(
            'hybrid',  #landing_zone.zone_name.lower(),
            core.Stack.of(self).region)
        self.bucket = s3.Bucket(
            self,
            'Bucket',
            removal_policy=core.RemovalPolicy.RETAIN,
            bucket_name=bucket_name,
            cors=[
                s3.CorsRule(allowed_methods=[s3.HttpMethods.GET],
                            allowed_origins=['*'])
            ],
            lifecycle_rules=[
                s3.LifecycleRule(
                    id='Retain_5Years',
                    abort_incomplete_multipart_upload_after=core.Duration.days(
                        7),
                    expiration=core.Duration.days(365 * 5)),
                s3.LifecycleRule(id='Remove_CachedFiles',
                                 tag_filters={'Cached': '7d'},
                                 expiration=core.Duration.days(7))
            ])

        # Create Notification Topics for eventing
        self.frameAnalyzed = sns.Topic(
            self,
            'FrameAnalysis',
            display_name='HomeNet-{}-Rtsp-FrameAnalysis'.format(
                landing_zone.zone_name),
            topic_name='HomeNet-{}-Rtsp-FrameAnalysis'.format(
                landing_zone.zone_name))

        self.frameUploaded = sns.Topic(
            self,
            'RtspFrameUploaded',
            display_name='HomeNet-{}-Rtsp-FrameUploaded'.format(
                landing_zone.zone_name),
            topic_name='HomeNet-{}-Rtsp-FrameUploaded'.format(
                landing_zone.zone_name))

        self.bucket.add_event_notification(
            s3.EventType.OBJECT_CREATED,
            s3n.SnsDestination(topic=self.frameUploaded))

        # Setup databases
        self.time_stream = TimeStreamConstruct(self,
                                               'TimeStream',
                                               landing_zone=landing_zone)

        self.face_table = ddb.Table(
            self,
            'FaceTable',
            table_name='HomeNet-{}-FaceTable'.format(landing_zone.zone_name),
            partition_key=ddb.Attribute(name='PartitionKey',
                                        type=ddb.AttributeType.STRING),
            sort_key=ddb.Attribute(name='SortKey',
                                   type=ddb.AttributeType.STRING),
            billing_mode=ddb.BillingMode.PAY_PER_REQUEST,
            point_in_time_recovery=True)
    def __init__(self, scope: core.Construct, construct_id: str,
                 processing_bucket_name, processing_bucket_upload_prefix,
                 processing_bucket_output_prefix) -> None:
        super().__init__(scope, construct_id)

        processing_bucket = aws_s3.Bucket(
            self,
            'processing_bucket',
            bucket_name=processing_bucket_name,
            removal_policy=core.RemovalPolicy.DESTROY,
            cors=[
                aws_s3.CorsRule(allowed_headers=["*"],
                                allowed_methods=[aws_s3.HttpMethods.PUT],
                                allowed_origins=["*"])
            ])
        images_table_name = "images"
        ddb_images_table = aws_dynamodb.Table(
            self,
            "ddb_images_table",
            table_name=images_table_name,
            partition_key={
                "name": "id",
                "type": aws_dynamodb.AttributeType.STRING
            },
            removal_policy=core.RemovalPolicy.
            DESTROY  # NOT recommended for production code
        )

        # this lambda will process images once they arrive in s3
        lambda_name = 'image-pipeline-image-processor'
        image_processing_lambda = aws_lambda.Function(
            self,
            lambda_name,
            function_name=lambda_name,
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            code=aws_lambda.Code.asset('lambda_functions/image_processor'),
            handler='image_processor.handler',
            layers=[
                aws_lambda_python.PythonLayerVersion(
                    self,
                    "lambda_layers",
                    entry="lambda_layer",
                    compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_8])
            ],
            timeout=core.Duration.minutes(3),
            retry_attempts=0,
            environment={
                "OUTPUT_BUCKET": processing_bucket_name,
                "OUTPUT_PREFIX": processing_bucket_output_prefix,
                "IMAGE_TABLE_NAME": images_table_name
            })
        # set up lambda to trigger from s3 upload
        lambda_notification = aws_s3_notifications.LambdaDestination(
            image_processing_lambda)
        processing_bucket.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED, lambda_notification,
            aws_s3.NotificationKeyFilter(
                prefix=processing_bucket_upload_prefix))

        # will need to read the raw image and write processed images
        processing_bucket.grant_read_write(image_processing_lambda)
        ddb_images_table.grant_read_write_data(image_processing_lambda)

        # return this so the uploading stack can use them
        self.processing_bucket = processing_bucket
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 *,
                 repo_name: str = None,
                 lambda_code_etl: lambda_.CfnParametersCode = None,
                 lambda_code_serve: lambda_.CfnParametersCode = None,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        code = codecommit.Repository.from_repository_name(
            self, "ImportedRepo", repo_name)

        build_pipeline = codebuild.PipelineProject(
            self,
            "BuildPipeline",
            build_spec=codebuild.BuildSpec.from_object(
                dict(version="0.2",
                     phases=dict(
                         install=dict(commands=[
                             "npm install aws-cdk", "npm update",
                             "python -m pip install -r requirements.txt"
                         ]),
                         build=dict(commands=["npx cdk synth -o dist"])),
                     artifacts={
                         "base-directory": "dist",
                         "files": ["InfraStack.template.json"]
                     },
                     environment=dict(
                         buildImage=codebuild.LinuxBuildImage.STANDARD_2_0))))

        build_infra = codebuild.PipelineProject(
            self,
            'BuildInfra',
            build_spec=codebuild.BuildSpec.from_object(
                dict(version="0.2",
                     phases=dict(install=dict(commands=[
                         "python -m pip install -r requirements.txt",
                         "python lambda/test_etl.py"
                     ]), ),
                     artifacts={
                         "base-directory":
                         "lambda",
                         "files": [
                             "etl_module.py", "lambda-handler-etl.py",
                             "lambda-handler-serve.py"
                         ]
                     },
                     environment=dict(
                         buildImage=codebuild.LinuxBuildImage.STANDARD_2_0))))

        build_website = codebuild.PipelineProject(
            self,
            'PackageWebsite',
            build_spec=codebuild.BuildSpec.from_object(
                dict(version="0.2",
                     phases=dict(install=dict(commands=[""]), ),
                     artifacts={
                         "base-directory": "website",
                         "files": ["*"]
                     },
                     environment=dict(
                         buildImage=codebuild.LinuxBuildImage.STANDARD_2_0))))

        source_output = codepipeline.Artifact()
        build_pipeline_output = codepipeline.Artifact("BuildPipelineOutput")
        build_infra_output = codepipeline.Artifact("BuildInfraOutput")
        build_website_output = codepipeline.Artifact("BuildWebsiteOutput")

        infra_location = build_infra_output.s3_location

        params = lambda_code_etl.assign(
            bucket_name=infra_location.bucket_name,
            object_key=infra_location.object_key,
            object_version=infra_location.object_version)

        params.update(
            lambda_code_serve.assign(
                bucket_name=infra_location.bucket_name,
                object_key=infra_location.object_key,
                object_version=infra_location.object_version))

        # make an S3 bucket to use to host static files
        website_bucket = s3.Bucket(
            self,
            id + "_s3-bucket",
            bucket_name=('cdk-s3-static-website-blog-pb-2'),
            public_read_access=True,
            removal_policy=core.RemovalPolicy.DESTROY,
            website_index_document="dashboard.html",
            website_error_document='error.html',
            cors=[
                s3.CorsRule(allowed_methods=[s3.HttpMethods.GET],
                            allowed_origins=['*'])
            ])

        codepipeline.Pipeline(
            self,
            "Pipeline",
            stages=[
                codepipeline.StageProps(
                    stage_name="Source",
                    actions=[
                        codepipeline_actions.CodeCommitSourceAction(
                            action_name="CodeCommit_Source",
                            repository=code,
                            output=source_output)
                    ]),
                codepipeline.StageProps(
                    stage_name="Build",
                    actions=[
                        codepipeline_actions.CodeBuildAction(
                            action_name="Lambda_Build",
                            project=build_infra,
                            input=source_output,
                            outputs=[build_infra_output]),
                        codepipeline_actions.CodeBuildAction(
                            action_name="CDK_Build",
                            project=build_pipeline,
                            input=source_output,
                            outputs=[build_pipeline_output]),
                        codepipeline_actions.CodeBuildAction(
                            action_name="Website_Build",
                            project=build_website,
                            input=source_output,
                            outputs=[build_website_output])
                    ]),
                codepipeline.StageProps(
                    stage_name="Deploy",
                    actions=[
                        codepipeline_actions.
                        CloudFormationCreateUpdateStackAction(
                            action_name="Infra_CFN_Deploy",
                            template_path=build_pipeline_output.at_path(
                                "InfraStack.template.json"),
                            stack_name="InfraDeploymentStack",
                            admin_permissions=True,
                            parameter_overrides=params,
                            extra_inputs=[build_infra_output]),
                        codepipeline_actions.S3DeployAction(
                            action_name='S3_Deploy',
                            bucket=website_bucket,
                            input=build_website_output,
                        )
                    ])
            ])