def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)
        """ Create S3 Bucket: """
        konstone_bkt = _s3.Bucket(
            self,
            "konstoneAssets",
            bucket_name="luber-testing-bucket-policy-via-cdk",
            versioned=True,
            removal_policy=cdk.RemovalPolicy.DESTROY)

        # Add bucket resource policy:

        konstone_bkt.add_to_resource_policy(
            _iam.PolicyStatement(
                effect=_iam.Effect.ALLOW,
                actions=["s3:GetObject"],
                resources=[konstone_bkt.arn_for_objects("*.html")],
                principals=[_iam.AnyPrincipal()]))

        konstone_bkt.add_to_resource_policy(
            _iam.PolicyStatement(
                effect=_iam.Effect.DENY,
                actions=["s3:*"],
                resources=[f"{konstone_bkt.bucket_arn}/*"],
                principals=[_iam.AnyPrincipal()],
                conditions={"Bool": {
                    "aws:SecureTransport": False
                }}))
Exemplo n.º 2
0
 def _create_artifact_bucket(self, bucket_name: str):
     artifact_bucket = aws_s3.Bucket(
         self,
         "PipelineAssets",
         bucket_name=bucket_name,
         encryption=aws_s3.BucketEncryption.KMS_MANAGED,
         removal_policy=core.RemovalPolicy.DESTROY)
     artifact_bucket.add_to_resource_policy(
         permission=aws_iam.PolicyStatement(
             principals=[aws_iam.AnyPrincipal()],
             effect=aws_iam.Effect.DENY,
             resources=[
                 artifact_bucket.bucket_arn,
                 f"{artifact_bucket.bucket_arn}/*"
             ],
             actions=["s3:*"],
             conditions={"Bool": {
                 "aws:SecureTransport": "false"
             }}))
     artifact_bucket.add_to_resource_policy(
         permission=aws_iam.PolicyStatement(
             principals=[aws_iam.AnyPrincipal()],
             effect=aws_iam.Effect.DENY,
             resources=[
                 artifact_bucket.bucket_arn,
                 f"{artifact_bucket.bucket_arn}/*"
             ],
             actions=["s3:PutObject"],
             conditions={
                 "StringNotEquals": {
                     "s3:x-amz-server-side-encryption": "aws:kms"
                 }
             }))
     return artifact_bucket
Exemplo n.º 3
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        #s3 bucket
        cdk_bucket_01 = aws_s3.Bucket(
            self,
            "cdkBucket01",
            versioned=True,
            removal_policy=core.RemovalPolicy.DESTROY)

        #s3 resource policy
        cdk_bucket_01.add_to_resource_policy(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                actions=["s3:GetObject"],
                resources=[cdk_bucket_01.arn_for_objects("*.txt")],
                principals=[aws_iam.AnyPrincipal()]))

        cdk_bucket_01.add_to_resource_policy(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.DENY,
                actions=["s3:*"],
                resources=[f"{cdk_bucket_01.bucket_arn}/*"],
                principals=[aws_iam.AnyPrincipal()],
                conditions={"Bool": {
                    "aws:SecureTransport": False
                }}))
Exemplo n.º 4
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        mybucket = _s3.Bucket(
            self,
            "mybucketId",
            bucket_name="s04-resource-policy111",
            versioned=True,
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        # add bucket resource policy
        mybucket.add_to_resource_policy(
            _iam.PolicyStatement(
                effect=_iam.Effect.ALLOW,
                actions=["s3:GetObject"],
                resources=[mybucket.arn_for_objects("*.html")],
                principals=[_iam.AnyPrincipal()],
            )
        )

        mybucket.add_to_resource_policy(
            _iam.PolicyStatement(
                effect=_iam.Effect.DENY,
                actions=["s3:*"],
                resources=[f"{mybucket.bucket_arn}/*"],
                principals=[_iam.AnyPrincipal()],
                conditions={"Bool": {"aws:SecureTransport": False}},
            )
        )
Exemplo n.º 5
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        source_bucket = s3.Bucket(self, 'DnaFront', versioned=True,)
        source_bucket.add_to_resource_policy(iam.PolicyStatement(
            actions=["s3:GetObject"],
            resources=[source_bucket.arn_for_objects("*")],
            principals=[iam.AnyPrincipal()],
        ))

        distribution = cfn.CloudFrontWebDistribution(self, "DnaFrontEndDistributor",
            origin_configs=[
                cfn.SourceConfiguration(
                    s3_origin_source=cfn.S3OriginConfig(
                        s3_bucket_source=source_bucket
                    ),
                    behaviors=[cfn.Behavior(is_default_behavior=True)]
                )
            ],
            default_root_object='index.html',
            viewer_protocol_policy=cfn.ViewerProtocolPolicy.REDIRECT_TO_HTTPS,
            error_configurations=[
                cfn.CfnDistribution.CustomErrorResponseProperty(error_code=403, response_code=200, error_caching_min_ttl=5, response_page_path='/index.html'),
                cfn.CfnDistribution.CustomErrorResponseProperty(error_code=404, response_code=200, error_caching_min_ttl=5, response_page_path='/index.html'),
            ],
        )
Exemplo n.º 6
0
    def _setup_elasticsearch_7_10_fgac(self) -> None:
        domain_name = "wrangler-es-7-10-fgac"
        validate_domain_name(domain_name)
        domain_arn = f"arn:aws:es:{self.region}:{self.account}:domain/{domain_name}"
        domain = opensearch.Domain(
            self,
            domain_name,
            domain_name=domain_name,
            version=opensearch.EngineVersion.ELASTICSEARCH_7_10,
            capacity=opensearch.CapacityConfig(
                data_node_instance_type="t3.small.search", data_nodes=1),
            access_policies=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=["es:*"],
                    principals=[iam.AnyPrincipal()],  # FGACs
                    resources=[f"{domain_arn}/*"],
                )
            ],
            fine_grained_access_control=opensearch.AdvancedSecurityOptions(
                master_user_name=self.username,
                master_user_password=self.password_secret,
            ),
            node_to_node_encryption=True,
            encryption_at_rest=opensearch.EncryptionAtRestOptions(
                enabled=True, kms_key=self.key),
            enforce_https=True,
            removal_policy=RemovalPolicy.DESTROY,
        )

        CfnOutput(self,
                  f"DomainEndpoint-{domain_name}",
                  value=domain.domain_endpoint)
    def secure_bucket(self, name, suppressions=None, **kwargs):
        bucket = Bucket(self,
                        name,
                        removal_policy=RemovalPolicy.RETAIN,
                        encryption=BucketEncryption.S3_MANAGED,
                        block_public_access=BlockPublicAccess.BLOCK_ALL,
                        **kwargs)
        bucket.add_to_resource_policy(
            iam.PolicyStatement(
                sid="HttpsOnly",
                resources=[
                    bucket.arn_for_objects("*"),
                ],
                actions=["*"],
                effect=iam.Effect.DENY,
                principals=[iam.AnyPrincipal()],
                conditions={"Bool": {
                    "aws:SecureTransport": False
                }},
            ))
        bucket_cfn = bucket.node.default_child  # type: CfnResource
        bucket_cfn.override_logical_id(name)
        if suppressions:
            add_cfn_nag_suppressions(bucket_cfn, suppressions)

        return bucket
Exemplo n.º 8
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        self.wwwBucket = s3.Bucket(
            self,
            "HtmlBucket",
            access_control=s3.BucketAccessControl.PUBLIC_READ,
            bucket_name="music-suite-www-dev",
            website_index_document="index.html",
            website_error_document="404.html")

        s3_deployment.BucketDeployment(
            self,
            "HtmlDeployment",
            destination_bucket=self.wwwBucket,
            sources=[s3_deployment.Source.asset("html")])

        self.publicReadStatement = iam.PolicyStatement()
        self.publicReadStatement.add_actions("s3:GetObject")
        self.publicReadStatement.add_resources("%s/*" %
                                               self.wwwBucket.bucket_arn)
        self.publicReadStatement.add_principals(iam.AnyPrincipal())

        self.wwwBucket.add_to_resource_policy(self.publicReadStatement)
Exemplo n.º 9
0
    def __init__(self, app: core.App, id: str, **kwargs) -> None:
        """Deploy the frontend to an S3 bucket
           
           Sets up:
           - An S3 with public access, static website hosting, and a bucket policy 
             that allows anonymous GetObject calls
        """
        super().__init__(app, id, **kwargs)

        bucket = _s3.Bucket(self,
                            "guestbook",
                            public_read_access=True,
                            website_index_document="index.html")

        bucket_policy = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            actions=["s3:GetObject"],
            principals=[_iam.AnyPrincipal()],
            resources=[bucket.bucket_arn + "/*"])

        bucket.add_to_resource_policy(bucket_policy)

        _s3_deploy.BucketDeployment(self,
                                    "DeployWebsite",
                                    sources=[_s3_deploy.Source.asset("./src")],
                                    destination_bucket=bucket)

        core.CfnOutput(self,
                       'BucketDomainName',
                       value=bucket.bucket_website_domain_name)
Exemplo n.º 10
0
    def __init__(self, scope: core.Construct, id: str, config: Dict,
                 vpc: ec2.Vpc, es_sg: ec2.SecurityGroup) -> None:
        super().__init__(scope, id)

        es_config = config['data']['elasticsearch']

        # Build ES domain construct parameter
        capacity_config = es.CapacityConfig(
            master_node_instance_type=es_config['capacity']['masterNodes']
            ['instanceType'],
            master_nodes=es_config['capacity']['masterNodes']['count'],
            data_node_instance_type=es_config['capacity']['dataNodes']
            ['instanceType'],
            data_nodes=es_config['capacity']['dataNodes']['count'],
        )

        vpc_options = es.VpcOptions(
            security_groups=[es_sg],
            subnets=vpc.select_subnets(
                subnet_group_name=es_config['subnetGroupName']).subnets,
        )

        ebs_options = es.EbsOptions(volume_size=es_config['ebs']['volumeSize'])

        zone_awareness = es.ZoneAwarenessConfig(
            availability_zone_count=es_config['zoneAwareness']['count'],
            enabled=es_config['zoneAwareness']['enabled'],
        )

        logging_options = es.LoggingOptions(
            app_log_enabled=es_config['logging']['appLogEnabled'],
            audit_log_enabled=es_config['logging']['auditLogEnabled'],
            slow_index_log_enabled=es_config['logging']['slowIndexLogEnabled'],
            slow_search_log_enabled=es_config['logging']
            ['slowIearchLogEnabled'])

        access_policy = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            principals=[iam.AnyPrincipal()],
            actions=['es:*'],
            resources=[
                "arn:aws:es:" + config['awsRegion'] + ":" +
                config['awsAccount'] + ":domain/" + es_config['domainName'] +
                "/*"
            ])

        # Create ES domain
        es.Domain(
            self,
            'Domain',
            domain_name=es_config['domainName'],
            version=es.ElasticsearchVersion.of(es_config['version']),
            capacity=capacity_config,
            ebs=ebs_options,
            zone_awareness=zone_awareness,
            vpc_options=vpc_options,
            logging=logging_options,
            access_policies=[access_policy],
        )
Exemplo n.º 11
0
    def __init__(self,
                 scope: cdk.Construct,
                 construct_id: str,
                 stack_log_level: str,
                 custom_bkt_name: str = None,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self.data_bkt = _s3.Bucket(
            self,
            "dataBucket",
            # auto_delete_objects=True,
            # removal_policy=cdk.RemovalPolicy.DESTROY,
            # bucket_name="new-app-bucket-example",
        )

        # Lets set custom bucket name if it is set
        if custom_bkt_name:
            cfn_data_bkt = self.data_bkt.node.default_child
            cfn_data_bkt.add_override("Properties.BucketName", custom_bkt_name)

        self.data_bkt.add_to_resource_policy(
            _iam.PolicyStatement(actions=["*"],
                                 principals=[_iam.AnyPrincipal()],
                                 resources=[
                                     f"{self.data_bkt.bucket_arn}",
                                     f"{self.data_bkt.arn_for_objects('*')}"
                                 ],
                                 conditions={
                                     "StringEquals": {
                                         "s3:DataAccessPointAccount":
                                         f"{cdk.Aws.ACCOUNT_ID}"
                                     }
                                 }))

        ###########################################
        ################# OUTPUTS #################
        ###########################################
        output_0 = cdk.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{GlobalArgs.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page."
        )
        output_1 = cdk.CfnOutput(self,
                                 "dataSourceBucket",
                                 value=f"{self.data_bkt.bucket_name}",
                                 description=f"The datasource bucket name")
        output_2 = cdk.CfnOutput(
            self,
            "dataSourceBucketUrl",
            value=
            f"https://console.aws.amazon.com/s3/buckets/{self.data_bkt.bucket_name}",
            description=f"The datasource bucket name")
Exemplo n.º 12
0
 def create_all_topics(self) -> None:
     """
     Create all stack topics
     """
     # Internal topics
     # General alarm topic to signal problems in stack execution
     # and e-mail subscription
     self.topics_["alarm_topic"] = sns.Topic(self, "alarm_topic")
     self.topics_["alarm_topic"].add_subscription(
         sns_subscriptions.EmailSubscription(settings.operator_email))
     # Public STAC item topic for new STAC items
     self.topics_["stac_item_topic"] = sns.Topic(self, "stac_item_topic")
     core.CfnOutput(
         self,
         "stac_item_topic_output",
         value=self.topics_["stac_item_topic"].topic_arn,
         description="STAC item topic",
     )
     sit_policy = iam.PolicyDocument(
         assign_sids=True,
         statements=[
             iam.PolicyStatement(
                 actions=["SNS:Subscribe", "SNS:Receive"],
                 principals=[iam.AnyPrincipal()],
                 resources=[self.topics_["stac_item_topic"].topic_arn],
             )
         ],
     )
     sit_policy.add_statements(
         iam.PolicyStatement(
             actions=[
                 "SNS:GetTopicAttributes",
                 "SNS:SetTopicAttributes",
                 "SNS:AddPermission",
                 "SNS:RemovePermission",
                 "SNS:DeleteTopic",
                 "SNS:Subscribe",
                 "SNS:ListSubscriptionsByTopic",
                 "SNS:Publish",
                 "SNS:Receive",
             ],
             principals=[iam.AccountPrincipal(self.account)],
             resources=[self.topics_["stac_item_topic"].topic_arn],
         ))
     # We could add the document directly to stac_item_policy
     sns.TopicPolicy(
         self,
         "sns_public_topic_policy",
         topics=[self.topics_["stac_item_topic"]],
         policy_document=sit_policy,
     )
     # Reconcile topic, used internally for reconciliation operations
     self.topics_["reconcile_stac_item_topic"] = sns.Topic(
         self, "reconcile_stac_item_topic")
Exemplo n.º 13
0
def apply_secure_bucket_policy(bucket):
    bucket.add_to_resource_policy(
        iam.PolicyStatement(
            sid="HttpsOnly",
            effect=iam.Effect.DENY,
            actions=["*"],
            resources=[f"{bucket.bucket_arn}/*"],
            principals=[iam.AnyPrincipal()],
            conditions={"Bool": {
                "aws:SecureTransport": "false"
            }},
        ))
Exemplo n.º 14
0
    def __init__(self, scope: core.Construct, id: str, ** kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create an S3 Bucket):
        konstone_bkt = _s3.Bucket(self,
                                  "konstoneAssets",
                                  versioned=True,
                                  # doesnt work if there is content in bucket, manual delete necessary
                                  removal_policy=core.RemovalPolicy.DESTROY 
                                  )

        # Add Bucket Resource policy
        konstone_bkt.add_to_resource_policy(
            _iam.PolicyStatement(
                effect=_iam.Effect.ALLOW,
                actions=["s3:GetObject"],
                # give public access only to html files
                resources=[konstone_bkt.arn_for_objects("*.html")],
                # anyone can access
                principals=[_iam.AnyPrincipal()]
            )
        )

        konstone_bkt.add_to_resource_policy(
            _iam.PolicyStatement(
                effect=_iam.Effect.DENY,
                actions=["s3:*"],
                # any objects in the bucket
                resources=[f"{konstone_bkt.bucket_arn}/*"],
                principals=[_iam.AnyPrincipal()],
                # if secure transport is false deny access, only https requests
                conditions={
                    "Bool": {"aws:SecureTransport": False}
                }
            )
        )
Exemplo n.º 15
0
    def __init__(self, app: core.App, cfn_name: str, stack_env):
        super().__init__(scope=app, id=f"{cfn_name}-{stack_env}")

        # lambda
        lambda_function = lambda_.Function(
            scope=self,
            id=f"{cfn_name}-lambda-task",
            code=lambda_.AssetCode.from_asset("lambda_script"),
            handler="lambda_handler.lambda_task",
            timeout=core.Duration.seconds(10),
            runtime=self.LAMBDA_PYTHON_RUNTIME,
            memory_size=128)

        # resource policy
        whitelisted_ips = ["127.0.0./32"]
        api_resource_policy = iam.PolicyDocument(statements=[
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                actions=["execute-api:Invoke"],
                principals=[iam.AnyPrincipal()],
                resources=["execute-api:/*/*/*"],
                conditions={"IpAddress": {
                    "aws:SourceIp": whitelisted_ips
                }})
        ])

        # api_gateway
        base_api = apigw_.RestApi(
            scope=self,
            id=f"{cfn_name}-{stack_env}-apigw",
            rest_api_name=f"{cfn_name}-{stack_env}-apigw",
            deploy_options=apigw_.StageOptions(stage_name=stack_env),
            policy=api_resource_policy)

        api_entity = base_api.root.add_resource("task")
        api_entity_lambda = apigw_.LambdaIntegration(
            handler=lambda_function,
            integration_responses=[
                apigw_.IntegrationResponse(status_code="200")
            ])

        api_entity.add_method(http_method="POST",
                              integration=api_entity_lambda)
Exemplo n.º 16
0
    def _create_site_bucket(self):
        """Creates a public S3 bucket for the static site construct"""
        self.bucket = s3.Bucket(
            self,
            "site_bucket",
            bucket_name=self._site_domain_name,
            website_index_document="index.html",
            website_error_document="404.html",
            removal_policy=RemovalPolicy.DESTROY,
            auto_delete_objects=True,
        )
        bucket_policy = iam.PolicyStatement(
            actions=["s3:GetObject"],
            resources=[self.bucket.arn_for_objects("*")],
            principals=[iam.AnyPrincipal()],
        )
        bucket_policy.add_condition(
            "StringEquals",
            {"aws:Referer": self.__origin_referer_header},
        )

        self.bucket.add_to_resource_policy(bucket_policy)
Exemplo n.º 17
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        flink_app_repo = ecr.Repository(self,
                                        id,
                                        removal_policy=core.RemovalPolicy.DESTROY,
                                        repository_name="payment-digestor"
                                        )

        flink_app_repo.add_lifecycle_rule(max_image_count=1)

        flink_app_repo.add_to_resource_policy(iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=["ecr:GetDownloadUrlForLayer",
                     "ecr:BatchGetImage",
                     "ecr:BatchCheckLayerAvailability",
                     "ecr:GetAuthorizationToken"],
            principals=[iam.ServicePrincipal(service="ecs.amazonaws.com"),
                        iam.ServicePrincipal(service="ecs-tasks.amazonaws.com")],
        ))

        flink_app_repo.add_to_resource_policy(iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=["ecr:GetDownloadUrlForLayer",
                     "ecr:BatchGetImage",
                     "ecr:BatchCheckLayerAvailability",
                     "ecr:PutImage",
                     "ecr:InitiateLayerUpload",
                     "ecr:UploadLayerPart",
                     "ecr:CompleteLayerUpload"],
            principals=[iam.AccountPrincipal(account_id=self.account)]
        ))

        flink_app_repo.add_to_resource_policy(iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=["ecr:ListImages"],
            principals=[iam.AnyPrincipal()]
        ))
Exemplo n.º 18
0
    def __init__(self, scope: core.Construct, id: str,
                 target_cluster: eks.Cluster, vpc_id: str,
                 vpc_subnets: [ec2.ISubnet], **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        cluster_vpc = ec2.Vpc.from_lookup(self, 'VPC', vpc_id=vpc_id)
        node_group_id = "ng-" + id
        eks.Nodegroup(self,
                      id=node_group_id,
                      cluster=target_cluster,
                      ami_type=eks.NodegroupAmiType.AL2_X86_64,
                      desired_size=1,
                      disk_size=20,
                      force_update=False,
                      instance_type=ec2.InstanceType.of(
                          ec2.InstanceClass.BURSTABLE3,
                          ec2.InstanceSize.MEDIUM),
                      max_size=1,
                      min_size=1,
                      nodegroup_name=node_group_id,
                      subnets=ec2.SubnetSelection(subnets=vpc_subnets))

        repository = ecr.Repository(self,
                                    id=id,
                                    image_scan_on_push=False,
                                    repository_name=id)

        repository.add_to_resource_policy(
            iam.PolicyStatement(
                sid="IPAllow",
                effect=iam.Effect.ALLOW,
                principals=[iam.AnyPrincipal()],
                actions=["ecr:*"],
                conditions=dict({
                    "IpAddress":
                    dict({"aws:SourceIp": cluster_vpc.vpc_cidr_block})
                })))
Exemplo n.º 19
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        vpc,
        stack_log_level: str,
        back_end_api_name: str,
        **kwargs
    ) -> None:
        super().__init__(scope, id, **kwargs)

        # Create Serverless Event Processor using Lambda):
        # Read Lambda Code):
        try:
            with open("secure_private_api/stacks/back_end/lambda_src/serverless_greeter.py", mode="r") as f:
                greeter_fn_code = f.read()
        except OSError as e:
            print("Unable to read Lambda Function Code")
            raise e

        greeter_fn = _lambda.Function(
            self,
            "getSquareFn",
            function_name=f"greeter_fn_{id}",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="index.lambda_handler",
            code=_lambda.InlineCode(greeter_fn_code),
            timeout=core.Duration.seconds(15),
            reserved_concurrent_executions=1,
            environment={
                "LOG_LEVEL": f"{stack_log_level}",
                "Environment": "Production",
                "ANDON_CORD_PULLED": "False"
            }
        )
        greeter_fn_version = greeter_fn.latest_version
        greeter_fn_version_alias = _lambda.Alias(
            self,
            "greeterFnAlias",
            alias_name="MystiqueAutomation",
            version=greeter_fn_version
        )

        # Create Custom Loggroup
        # /aws/lambda/function-name
        greeter_fn_lg = _logs.LogGroup(
            self,
            "squareFnLoggroup",
            log_group_name=f"/aws/lambda/{greeter_fn.function_name}",
            retention=_logs.RetentionDays.ONE_WEEK,
            removal_policy=core.RemovalPolicy.DESTROY
        )

        # Add API GW front end for the Lambda
        back_end_01_api_stage_options = _apigw.StageOptions(
            stage_name="miztiik",
            throttling_rate_limit=10,
            throttling_burst_limit=100,
            logging_level=_apigw.MethodLoggingLevel.INFO
        )

        # Lets create a private secure end point

        # Create a security group dedicated to our API Endpoint
        self.secure_private_api_01_sec_grp = _ec2.SecurityGroup(
            self,
            "secureApi01SecurityGroup",
            vpc=vpc,
            allow_all_outbound=True,
            description="Miztiik Automation: Secure our private API using security groups"
        )

        # Allow 443 inbound on our Security Group
        self.secure_private_api_01_sec_grp.add_ingress_rule(
            _ec2.Peer.ipv4(vpc.vpc_cidr_block),
            _ec2.Port.tcp(443)
        )

        secure_private_api_01_endpoint = _ec2.InterfaceVpcEndpoint(
            self,
            "secureApi01Endpoint",
            vpc=vpc,
            service=_ec2.InterfaceVpcEndpointAwsService.APIGATEWAY,
            private_dns_enabled=True,
            subnets=_ec2.SubnetSelection(
                subnet_type=_ec2.SubnetType.ISOLATED
            )
        )

        # Create a API Gateway Resource Policy to attach to API GW
        # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-apigateway-restapi.html#cfn-apigateway-restapi-policy
        secure_private_api_01_res_policy = _iam.PolicyDocument(
            statements=[
                _iam.PolicyStatement(
                    principals=[_iam.AnyPrincipal()],
                    actions=["execute-api:Invoke"],
                    # resources=[f"{api_01.arn_for_execute_api(method="GET",path="greeter", stage="miztiik")}"],
                    resources=[core.Fn.join("", ["execute-api:/", "*"])],
                    effect=_iam.Effect.DENY,
                    conditions={
                        "StringNotEquals":
                        {
                            "aws:sourceVpc": f"{secure_private_api_01_endpoint.vpc_endpoint_id}"
                        }
                    },
                    sid="DenyAllNonVPCAccessToApi"
                ),
                _iam.PolicyStatement(
                    principals=[_iam.AnyPrincipal()],
                    actions=["execute-api:Invoke"],
                    resources=[core.Fn.join("", ["execute-api:/", "*"])],
                    effect=_iam.Effect.ALLOW,
                    sid="AllowVPCAccessToApi"
                )
            ]
        )

        # Create API Gateway
        secure_private_api_01 = _apigw.RestApi(
            self,
            "backEnd01Api",
            rest_api_name=f"{back_end_api_name}",
            deploy_options=back_end_01_api_stage_options,
            endpoint_types=[
                _apigw.EndpointType.PRIVATE
            ],
            policy=secure_private_api_01_res_policy,
        )

        back_end_01_api_res = secure_private_api_01.root.add_resource("secure")
        greeter = back_end_01_api_res.add_resource("greeter")

        greeter_method_get = greeter.add_method(
            http_method="GET",
            request_parameters={
                "method.request.header.InvocationType": True,
                "method.request.path.number": True
            },
            integration=_apigw.LambdaIntegration(
                handler=greeter_fn,
                proxy=True
            )
        )

        # Outputs
        output_1 = core.CfnOutput(
            self,
            "SecureApiUrl",
            value=f"{greeter.url}",
            description="Use an utility like curl from the same VPC as the API to invoke it."
        )
    def __init__(self, scope: core.Construct, id: str, datalake: DataLakeLayer,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # self.encryption_key = kms.Key(
        #   self, 'EncryptionKey',
        #   removal_policy=core.RemovalPolicy.DESTROY,
        #   enable_key_rotation=True)

        policy = iam.PolicyStatement(sid='Allow-by-IPAddress',
                                     actions=['es:*'],
                                     principals=[iam.AnyPrincipal()],
                                     resources=['*'])

        # Not supported with ES in Vpc mode.
        # policy.add_condition('IpAddress',{
        #     'aws:SourceIp':'74.102.88.0/24'
        # })
        self.security_group = ec2.SecurityGroup(
            self,
            'SecurityGroup',
            vpc=datalake.vpc,
            allow_all_outbound=True,
            description='Elastic Search Security Group')
        self.security_group.add_ingress_rule(peer=ec2.Peer.any_ipv4(),
                                             connection=ec2.Port.all_traffic(),
                                             description='Allow all')

        self.search = es.Domain(
            self,
            'SearchCluster',
            version=es.ElasticsearchVersion.V7_9,
            enforce_https=True,
            node_to_node_encryption=True,
            capacity=es.CapacityConfig(
                master_nodes=3,
                #warm_nodes=len(self.vpc.availability_zones),
                data_nodes=2  # len(self.vpc.availability_zones),              
            ),
            zone_awareness=es.ZoneAwarenessConfig(
                availability_zone_count=2  #len(self.vpc.availability_zones)
            ),
            # encryption_at_rest=es.EncryptionAtRestOptions(
            #     enabled=False,
            #     kms_key=self.encryption_key
            # ),
            vpc_options=es.VpcOptions(subnets=datalake.vpc.private_subnets,
                                      security_groups=[self.security_group]),
            logging=es.LoggingOptions(
                app_log_enabled=True,
                app_log_group=logs.LogGroup(
                    self,
                    'SearchAppLogGroup',
                    removal_policy=core.RemovalPolicy.DESTROY,
                    retention=logs.RetentionDays.ONE_MONTH),
                audit_log_enabled=False,
                audit_log_group=logs.LogGroup(
                    self,
                    'SearchAuditLogs',
                    removal_policy=core.RemovalPolicy.DESTROY,
                    retention=logs.RetentionDays.ONE_MONTH),
                slow_index_log_enabled=True,
                slow_index_log_group=logs.LogGroup(
                    self,
                    'SearchSlowIndex',
                    removal_policy=core.RemovalPolicy.DESTROY,
                    retention=logs.RetentionDays.ONE_MONTH),
            ),
            access_policies=[policy])

        # Configre the LinkedServiceRole to update the VPC
        serviceLinkedRole = core.CfnResource(
            self,
            'LinkedServiceRole',
            type="AWS::IAM::ServiceLinkedRole",
            properties={
                'AWSServiceName': "es.amazonaws.com",
                'Description': "Role for ES to access resources in my VPC"
            })
        self.search.node.add_dependency(serviceLinkedRole)
Exemplo n.º 21
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Define S3 bucket that will host site assets
        website_bucket = s3.Bucket(
            self,
            'parthrparikh-com-assets-bucket',
            website_index_document='index.html',
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
        )
        # Deny non-SSL traffic
        website_bucket.add_to_resource_policy(
            iam.PolicyStatement(
                effect=iam.Effect.DENY,
                actions=["s3:*"],
                resources=[website_bucket.bucket_arn],
                conditions={'Bool': {
                    'aws:SecureTransport': False,
                }},
                principals=[iam.AnyPrincipal()],
            ))
        s3_deploy.BucketDeployment(
            self,
            'parthrparikh-com-deploy-website',
            sources=[s3_deploy.Source.asset('../website/')],
            destination_bucket=website_bucket,
        )

        # Define certificate for parthrparikh.com
        cert = acm.Certificate(self,
                               'parthrparikh-com-cert',
                               domain_name='parthrparikh.com',
                               subject_alternative_names=[
                                   'www.parthrparikh.com',
                               ])

        # Define CloudFront distribution
        origin_access_identity = cf.OriginAccessIdentity(
            self,
            'OriginAccessIdentity',
            comment='Personal website (parthrparikh.com) OAI to reach bucket',
        )
        website_bucket.grant_read(origin_access_identity)
        distro = cf.CloudFrontWebDistribution(
            self,
            'parthrparikh-com-distribution',
            origin_configs=[
                cf.SourceConfiguration(
                    s3_origin_source=cf.S3OriginConfig(
                        s3_bucket_source=website_bucket,
                        origin_access_identity=origin_access_identity),
                    behaviors=[
                        cf.Behavior(
                            is_default_behavior=True,
                            default_ttl=core.Duration.minutes(10),
                            max_ttl=core.Duration.hours(1),
                        )
                    ],
                ),
            ],
            viewer_certificate=cf.ViewerCertificate.from_acm_certificate(
                certificate=cert,
                aliases=[
                    'parthrparikh.com',
                    'www.parthrparikh.com',
                ]),
            viewer_protocol_policy=cf.ViewerProtocolPolicy.REDIRECT_TO_HTTPS,
        )
Exemplo n.º 22
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        kms_for_s3 = kms.Key(
            self,
            "kms-for-s3",
            description="Encryption key for the KMS encrypted S3 bucket",
            removal_policy=core.RemovalPolicy.DESTROY  # We don't want this to stick around after the demo
        )

        different_kms_key = kms.Key(
            self,
            "different-kms-key",
            description="Another KMS Key",
            removal_policy=core.RemovalPolicy.DESTROY  # We don't want this to stick around after the demo
        )

        bucket_with_sse_s3 = s3.Bucket(
            self,
            "bucket-with-sse-s3",
            encryption=s3.BucketEncryption.S3_MANAGED
        )

        bucket_with_sse_s3.add_to_resource_policy(
            iam.PolicyStatement(
                effect=iam.Effect.DENY,
                actions=["s3:PutObject"],
                conditions={
                    "Null": {
                        "s3:x-amz-server-side-encryption": "false"
                    },
                    "StringNotEqualsIfExists": {
                        "s3:x-amz-server-side-encryption": "AES256"
                    }
                },
                principals=[iam.AnyPrincipal()],
                resources=[
                    bucket_with_sse_s3.arn_for_objects("*")
                ]
            )
        )

        bucket_with_sse_kms = s3.Bucket(
            self,
            "bucket-with-sse-kms",
            encryption=s3.BucketEncryption.KMS,
            encryption_key=kms_for_s3
        )

        bucket_with_sse_kms.add_to_resource_policy(
            iam.PolicyStatement(
                effect=iam.Effect.DENY,
                actions=["s3:PutObject"],
                conditions={
                    "Null": {
                        "s3:x-amz-server-side-encryption": "false"
                    },
                    "StringNotEqualsIfExists": {
                        "s3:x-amz-server-side-encryption": "aws:kms"
                    }
                },
                principals=[iam.AnyPrincipal()],
                resources=[
                    bucket_with_sse_kms.arn_for_objects("*")
                ]
            )
        )

        bucket_with_sse_kms.add_to_resource_policy(
            iam.PolicyStatement(
                effect=iam.Effect.DENY,
                actions=["s3:PutObject"],
                conditions={
                    "StringNotEqualsIfExists": {
                        "s3:x-amz-server-side-encryption-aws-kms-key-id": kms_for_s3.key_arn
                    }
                },
                principals=[iam.AnyPrincipal()],
                resources=[
                    bucket_with_sse_kms.arn_for_objects("*")
                ]
            )
        )

        encryption_test_function = _lambda.Function(
            self,
            "encryption-test-function",
            code=_lambda.Code.from_asset(
                path=os.path.join(os.path.dirname(__file__), "..", "encryption_test")
            ),
            handler="handler.lambda_handler",
            runtime=_lambda.Runtime.PYTHON_3_8,
            environment={
                "SSE_S3_BUCKET": bucket_with_sse_s3.bucket_name,
                "SSE_KMS_BUCKET": bucket_with_sse_kms.bucket_name,
                "KMS_FOR_S3": kms_for_s3.key_id,
                "DIFFERENT_KMS": different_kms_key.key_id,
            },
            timeout=core.Duration.seconds(15)
        )
        kms_for_s3.grant_encrypt_decrypt(encryption_test_function)
        different_kms_key.grant_encrypt_decrypt(encryption_test_function)
        bucket_with_sse_kms.grant_read_write(encryption_test_function)
        bucket_with_sse_s3.grant_read_write(encryption_test_function)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        organization_id = self.node.try_get_context("organizationId")

        # create dynamo table
        allocation_table = aws_dynamodb.Table(
            self,
            "CidrBlockTable",
            partition_key=aws_dynamodb.Attribute(
                name="vpcCidrBlock", type=aws_dynamodb.AttributeType.STRING))

        # create producer lambda function
        create_lambda = aws_lambda.Function(
            self,
            "create_lambda_function",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            handler="create.lambda_handler",
            code=aws_lambda.Code.asset("./src/"))

        create_lambda.add_environment("TABLE_NAME",
                                      allocation_table.table_name)
        create_lambda.add_environment("MASTER_CIDR_BLOCK", "10.0.0.0/12")
        create_lambda.add_environment("VPC_NETMASK", "24")
        create_lambda.add_environment("SUBNET_NETMASK", "26")

        # grant permission to lambda to write to demo table
        allocation_table.grant_write_data(create_lambda)
        allocation_table.grant_read_data(create_lambda)

        # API gateway ... Allow own Organizations
        api_policy = aws_iam.PolicyDocument()

        api_policy.add_statements(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                principals=[aws_iam.AnyPrincipal()],
                actions=["execute-api:Invoke"],
                resources=[core.Fn.join('', ['execute-api:/', '*'])]))

        api_policy.add_statements(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.DENY,
                actions=["execute-api:Invoke"],
                conditions={
                    "StringNotEquals": {
                        "aws:PrincipalOrgID": [organization_id]
                    }
                },
                principals=[aws_iam.AnyPrincipal()],
                resources=[core.Fn.join('', ['execute-api:/', '*'])]))

        base_api = aws_apigateway.RestApi(self,
                                          'ApiGateway',
                                          rest_api_name='cidr_vending_machine',
                                          policy=api_policy)

        vpc_api = base_api.root.add_resource('vpc')

        rest_api_role = aws_iam.Role(
            self,
            'RestAPIRole',
            assumed_by=aws_iam.ServicePrincipal('apigateway.amazonaws.com'),
        )
        allocation_table.grant_read_write_data(rest_api_role)

        patch_request_string = """
            {{
            "TableName": "{}",
                "Key": {{
                        "vpcCidrBlock": {{
                            "S": "$input.params('cidr_block')"
                        }}
                    }},
                    "UpdateExpression": "set vpcId = :v",
                    "ConditionExpression": "accountId = :v2",
                    "ExpressionAttributeValues" : {{
                        ":v": {{"S": "$input.params('vpc_id')"}},
                        ":v2": {{"S": "$context.identity.accountId"}}
                    }},
                    "ReturnValues": "ALL_NEW"

            }}"""

        delete_request_string = """
            {{
            "TableName": "{}",
                "Key": {{
                        "vpcCidrBlock": {{
                            "S": "$input.params('cidr_block')"
                        }}
                    }},
                "ConditionExpression": "accountId = :v2",
                "ExpressionAttributeValues" : {{
                    ":v2": {{"S": "$context.identity.accountId"}}
                }}
            }}"""

        network_integration = aws_apigateway.LambdaIntegration(create_lambda)
        update_integration = aws_apigateway.AwsIntegration(
            service='dynamodb',
            action='UpdateItem',
            integration_http_method='POST',
            options=aws_apigateway.IntegrationOptions(
                request_templates={
                    "application/json":
                    patch_request_string.format(allocation_table.table_name)
                },
                integration_responses=[
                    aws_apigateway.IntegrationResponse(status_code="200")
                ],
                credentials_role=rest_api_role))

        delete_integration = aws_apigateway.AwsIntegration(
            service='dynamodb',
            action='DeleteItem',
            integration_http_method='POST',
            options=aws_apigateway.IntegrationOptions(
                request_templates={
                    "application/json":
                    delete_request_string.format(allocation_table.table_name)
                },
                integration_responses=[
                    aws_apigateway.IntegrationResponse(status_code="200")
                ],
                credentials_role=rest_api_role))

        vpc_api.add_method(
            'POST',
            network_integration,
            authorization_type=aws_apigateway.AuthorizationType.IAM)
        vpc_api.add_method(
            'DELETE',
            delete_integration,
            authorization_type=aws_apigateway.AuthorizationType.IAM,
            method_responses=[
                aws_apigateway.MethodResponse(status_code="200")
            ])
        vpc_api.add_method(
            'PATCH',
            update_integration,
            authorization_type=aws_apigateway.AuthorizationType.IAM,
            method_responses=[
                aws_apigateway.MethodResponse(status_code="200")
            ])
Exemplo n.º 24
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        webhook_function,
        on_demand_function,
        schedule_update_function,
        status_query_function,
        slack_function,
        ingest_allowed_ips,
    ):
        super().__init__(scope, id)

        stack_name = core.Stack.of(self).stack_name

        policy = iam.PolicyDocument(
            statements=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=["execute-api:Invoke"],
                    principals=[iam.AnyPrincipal()],
                    # note that the policy is a prop of the api which cannot
                    # reference itself, see the Cloudformation documentation
                    # for api gateway policy attribute
                    resources=[core.Fn.join("", ["execute-api:/", "*"])],
                ),
                iam.PolicyStatement(
                    effect=iam.Effect.DENY,
                    actions=["execute-api:Invoke"],
                    principals=[iam.AnyPrincipal()],
                    resources=[
                        core.Fn.join("", ["execute-api:/", "*/POST/ingest"]),
                        core.Fn.join("", ["execute-api:/", "*/GET/status"]),
                    ],
                    conditions={
                        "NotIpAddress": {"aws:SourceIp": ingest_allowed_ips}
                    },
                ),
            ]
        )

        self.rest_api_name = f"{stack_name}-{names.REST_API}"

        log_group = logs.LogGroup(
            self,
            "apilogs",
            log_group_name=f"/aws/apigateway/{self.rest_api_name}/access_logs",
            removal_policy=core.RemovalPolicy.DESTROY,
            retention=logs.RetentionDays.SIX_MONTHS,
        )

        self.api = apigw.LambdaRestApi(
            self,
            "api",
            handler=webhook_function,  # default handler
            rest_api_name=self.rest_api_name,
            proxy=False,
            deploy=True,
            policy=policy,
            deploy_options=apigw.StageOptions(
                access_log_destination=apigw.LogGroupLogDestination(log_group),
                access_log_format=apigw.AccessLogFormat.clf(),
                data_trace_enabled=True,
                metrics_enabled=True,
                logging_level=apigw.MethodLoggingLevel.INFO,
                stage_name=names.API_STAGE,
            ),
        )

        self.api.add_api_key("ZoomIngesterApiKey")

        self.new_recording_resource = self.create_resource(
            "new_recording",
            webhook_function,
            "POST",
        )

        self.ingest_resource = self.create_resource(
            "ingest",
            on_demand_function,
            "POST",
            cors_options=apigw.CorsOptions(
                allow_origins=apigw.Cors.ALL_ORIGINS,
                allow_methods=["POST", "OPTIONS"],
                allow_headers=apigw.Cors.DEFAULT_HEADERS
                + ["Accept-Language", "X-Requested-With"],
            ),
        )

        self.schedule_update_resource = self.create_resource(
            "schedule_update",
            schedule_update_function,
            "POST",
        )

        self.status_query_resource = self.create_resource(
            "status",
            status_query_function,
            "GET",
        )

        self.slack_resource = self.create_resource(
            "slack",
            slack_function,
            "POST",
        )

        def endpoint_url(resource_name):
            return (
                f"https://{self.api.rest_api_id}.execute-api."
                f"{core.Stack.of(self).region}.amazonaws.com/"
                f"{names.API_STAGE}/{resource_name}"
            )

        on_demand_function.add_environment(
            "WEBHOOK_ENDPOINT_URL",
            endpoint_url("new_recording"),
        )

        core.CfnOutput(
            self,
            "WebhookEndpoint",
            export_name=f"{stack_name}-{names.WEBHOOK_ENDPOINT}-url",
            value=endpoint_url("new_recording"),
        )

        core.CfnOutput(
            self,
            "OnDemandEndpoint",
            export_name=f"{stack_name}-{names.ON_DEMAND_ENDPOINT}-url",
            value=endpoint_url("ingest"),
        )

        core.CfnOutput(
            self,
            "ScheduleUpdateEndpoint",
            export_name=f"{stack_name}-{names.SCHEDULE_UPDATE_ENDPOINT}-url",
            value=endpoint_url("schedule_update"),
        )

        core.CfnOutput(
            self,
            "StatusQueryEndpoint",
            export_name=f"{stack_name}-{names.STATUS_ENDPOINT}-url",
            value=endpoint_url("status"),
        )

        core.CfnOutput(
            self,
            "SlackEndpoint",
            export_name=f"{stack_name}-{names.SLACK_ENDPOINT}-url",
            value=endpoint_url("slack"),
        )

        core.CfnOutput(
            self,
            "WebhookResourceId",
            export_name=f"{stack_name}-{names.WEBHOOK_ENDPOINT}-resource-id",
            value=self.new_recording_resource.resource_id,
        )

        core.CfnOutput(
            self,
            "OnDemandResourceId",
            export_name=f"{stack_name}-{names.ON_DEMAND_ENDPOINT}-resource-id",
            value=self.ingest_resource.resource_id,
        )

        core.CfnOutput(
            self,
            "ScheduleUpdateResourceId",
            export_name=f"{stack_name}-{names.SCHEDULE_UPDATE_ENDPOINT}-resource-id",
            value=self.schedule_update_resource.resource_id,
        )

        core.CfnOutput(
            self,
            "StatusQueryResourceId",
            export_name=f"{stack_name}-{names.STATUS_ENDPOINT}-resource-id",
            value=self.status_query_resource.resource_id,
        )

        core.CfnOutput(
            self,
            "SlackResourceId",
            export_name=f"{stack_name}-{names.SLACK_ENDPOINT}-resource-id",
            value=self.slack_resource.resource_id,
        )

        core.CfnOutput(
            self,
            "RestApiId",
            export_name=f"{stack_name}-{names.REST_API}-id",
            value=self.api.rest_api_id,
        )
Exemplo n.º 25
0
    def __init__(self, scope: core.Construct, id: str, webhook_function,
                 on_demand_function, ingest_allowed_ips):
        super().__init__(scope, id)

        stack_name = core.Stack.of(self).stack_name

        policy = iam.PolicyDocument(statements=[
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                actions=["execute-api:Invoke"],
                principals=[iam.AnyPrincipal()],
                # note that the policy is a prop of the api which cannot reference itself
                # see the Cloudformation documentation for api gateway policy attribute
                resources=[core.Fn.join('', ['execute-api:/', '*'])]),
            iam.PolicyStatement(effect=iam.Effect.DENY,
                                actions=["execute-api:Invoke"],
                                principals=[iam.AnyPrincipal()],
                                resources=[
                                    core.Fn.join(
                                        '', ['execute-api:/', '*/POST/ingest'])
                                ],
                                conditions={
                                    "NotIpAddress": {
                                        "aws:SourceIp": ingest_allowed_ips
                                    }
                                })
        ])

        self.rest_api_name = f"{stack_name}-{names.REST_API}"

        log_group = logs.LogGroup(
            self,
            "apilogs",
            log_group_name=f"/aws/apigateway/{self.rest_api_name}/access_logs",
            removal_policy=core.RemovalPolicy.DESTROY,
            retention=logs.RetentionDays.SIX_MONTHS)

        self.api = apigw.LambdaRestApi(
            self,
            "api",
            handler=webhook_function,  # default handler
            rest_api_name=self.rest_api_name,
            proxy=False,
            deploy=True,
            policy=policy,
            deploy_options=apigw.StageOptions(
                access_log_destination=apigw.LogGroupLogDestination(log_group),
                access_log_format=apigw.AccessLogFormat.clf(),
                data_trace_enabled=True,
                metrics_enabled=True,
                logging_level=apigw.MethodLoggingLevel.INFO,
                stage_name=names.API_STAGE))

        self.api.add_api_key("ZoomIngesterApiKey")

        self.new_recording_resource = self.api.root.add_resource(
            "new_recording")
        self.new_recording_method = self.new_recording_resource.add_method(
            "POST",
            method_responses=[
                apigw.MethodResponse(status_code="200",
                                     response_models={
                                         "application/json":
                                         apigw.Model.EMPTY_MODEL
                                     })
            ])

        self.ingest_resource = self.api.root.add_resource("ingest",
            default_cors_preflight_options=apigw.CorsOptions(
                allow_origins=apigw.Cors.ALL_ORIGINS,
                allow_methods=["POST", "OPTIONS"],
                allow_headers=apigw.Cors.DEFAULT_HEADERS \
                              + ["Accept-Language","X-Requested-With"]
            )
        )
        on_demand_integration = apigw.LambdaIntegration(on_demand_function)
        self.ingest_method = self.ingest_resource.add_method(
            "POST",
            on_demand_integration,
            method_responses=[
                apigw.MethodResponse(status_code="200",
                                     response_models={
                                         "application/json":
                                         apigw.Model.EMPTY_MODEL
                                     })
            ])

        def endpoint_url(resource_name):
            return (f"https://{self.api.rest_api_id}.execute-api."
                    f"{core.Stack.of(self).region}.amazonaws.com/"
                    f"{names.API_STAGE}/{resource_name}")

        on_demand_function.add_environment("WEBHOOK_ENDPOINT_URL",
                                           endpoint_url("new_recording"))

        core.CfnOutput(
            self,
            "WebhookEndpoint",
            export_name=f"{stack_name}-{names.WEBHOOK_ENDPOINT}-url",
            value=endpoint_url("new_recording"))

        core.CfnOutput(
            self,
            "OnDemandEndpoint",
            export_name=f"{stack_name}-{names.ON_DEMAND_ENDPOINT}-url",
            value=endpoint_url("ingest"))

        core.CfnOutput(
            self,
            "WebhookResourceId",
            export_name=f"{stack_name}-{names.WEBHOOK_ENDPOINT}-resource-id",
            value=self.new_recording_resource.resource_id)

        core.CfnOutput(
            self,
            "OnDemandResourceId",
            export_name=f"{stack_name}-{names.ON_DEMAND_ENDPOINT}-resource-id",
            value=self.ingest_resource.resource_id)

        core.CfnOutput(self,
                       "RestApiId",
                       export_name=f"{stack_name}-{names.REST_API}-id",
                       value=self.api.rest_api_id)
Exemplo n.º 26
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 resources: FsiSharedResources,
                 subnet_group_name: str = 'Default',
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Configure the container resources...
        self.repo = assets.DockerImageAsset(self,
                                            'Repo',
                                            directory='src/fsi/earnings',
                                            file='Dockerfile')

        code = lambda_.DockerImageCode.from_ecr(
            repository=self.repo.repository,
            tag=self.repo.image_uri.split(':')[-1])

        # Configure security policies...
        role = iam.Role(
            self,
            'Role',
            assumed_by=iam.ServicePrincipal(service='lambda'),
            description='HomeNet-{}-Fsi-EarningsReport'.format(
                resources.landing_zone.zone_name),
            role_name='fsi-earnings@homenet.{}.{}'.format(
                resources.landing_zone.zone_name,
                core.Stack.of(self).region).lower(),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    managed_policy_name=
                    'service-role/AWSLambdaVPCAccessExecutionRole'),
            ])

        # Grant any permissions...
        self.earnings_table = d.Table(
            self,
            'EarningCalendar',
            table_name='FsiCoreSvc-EarningsCalendar',
            billing_mode=d.BillingMode.PAY_PER_REQUEST,
            partition_key=d.Attribute(name='PartitionKey',
                                      type=d.AttributeType.STRING),
            sort_key=d.Attribute(name='SortKey', type=d.AttributeType.STRING),
            time_to_live_attribute='Expiration',
            point_in_time_recovery=True,
            server_side_encryption=True)
        self.earnings_table.grant_read_write_data(role)

        # Define any variables for the function
        self.function_env = {
            'CACHE_TABLE': self.earnings_table.table_name,
        }

        # Create the backing webapi compute ...
        self.function = lambda_.DockerImageFunction(
            self,
            'Function',
            code=code,
            role=role,
            function_name='HomeNet-{}-Fsi-{}'.format(
                resources.landing_zone.zone_name, FsiEarningsGateway.__name__),
            description='Python Lambda function for ' +
            FsiEarningsGateway.__name__,
            timeout=core.Duration.seconds(30),
            tracing=lambda_.Tracing.ACTIVE,
            vpc=resources.landing_zone.vpc,
            log_retention=logs.RetentionDays.FIVE_DAYS,
            memory_size=128,
            allow_all_outbound=True,
            vpc_subnets=ec2.SubnetSelection(
                subnet_group_name=subnet_group_name),
            security_groups=[resources.landing_zone.security_group],
            environment=self.function_env,
        )

        # Bind APIG to Lambda compute...
        self.frontend_proxy = a.LambdaRestApi(
            self,
            'ApiGateway',
            proxy=True,
            handler=self.function,
            options=a.RestApiProps(
                description='Hosts the Earnings Calendar Services via ' +
                self.function.function_name,
                domain_name=a.DomainNameOptions(
                    domain_name='earnings.trader.fsi',
                    certificate=Certificate.from_certificate_arn(
                        self,
                        'Certificate',
                        certificate_arn=
                        'arn:aws:acm:us-east-2:581361757134:certificate/4e3235f7-49a1-42a5-a671-f2449b45f72d'
                    ),
                    security_policy=a.SecurityPolicy.TLS_1_0),
                policy=iam.PolicyDocument(statements=[
                    iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                        actions=['execute-api:Invoke'],
                                        principals=[iam.AnyPrincipal()],
                                        resources=['*'],
                                        conditions={
                                            'IpAddress': {
                                                'aws:SourceIp': [
                                                    '10.0.0.0/8',
                                                    '192.168.0.0/16',
                                                    '72.90.160.65/32'
                                                ]
                                            }
                                        })
                ]),
                endpoint_configuration=a.EndpointConfiguration(
                    types=[a.EndpointType.REGIONAL], )))

        # Register Dns Name
        r53.ARecord(self,
                    'AliasRecord',
                    zone=resources.trader_dns_zone,
                    record_name='earnings.%s' %
                    resources.trader_dns_zone.zone_name,
                    target=r53.RecordTarget.from_alias(
                        dns_targets.ApiGateway(self.frontend_proxy)))
Exemplo n.º 27
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 vpc: aws_ec2.Vpc,
                 ecs_cluster=aws_ecs.Cluster,
                 alb=elbv2.ApplicationLoadBalancer,
                 albTestListener=elbv2.ApplicationListener,
                 albProdListener=elbv2.ApplicationListener,
                 blueGroup=elbv2.ApplicationTargetGroup,
                 greenGroup=elbv2.ApplicationTargetGroup,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ECS_APP_NAME = "Nginx-app",
        ECS_DEPLOYMENT_GROUP_NAME = "NginxAppECSBlueGreen"
        ECS_DEPLOYMENT_CONFIG_NAME = "CodeDeployDefault.ECSLinear10PercentEvery1Minutes"
        ECS_TASKSET_TERMINATION_WAIT_TIME = 10
        ECS_TASK_FAMILY_NAME = "Nginx-microservice"
        ECS_APP_NAME = "Nginx-microservice"
        ECS_APP_LOG_GROUP_NAME = "/ecs/Nginx-microservice"
        DUMMY_TASK_FAMILY_NAME = "sample-Nginx-microservice"
        DUMMY_APP_NAME = "sample-Nginx-microservice"
        DUMMY_APP_LOG_GROUP_NAME = "/ecs/sample-Nginx-microservice"
        DUMMY_CONTAINER_IMAGE = "smuralee/nginx"

        # =============================================================================
        # ECR and CodeCommit repositories for the Blue/ Green deployment
        # =============================================================================

        # ECR repository for the docker images
        NginxecrRepo = aws_ecr.Repository(self,
                                          "NginxRepo",
                                          image_scan_on_push=True)

        NginxCodeCommitrepo = aws_codecommit.Repository(
            self,
            "NginxRepository",
            repository_name=ECS_APP_NAME,
            description="Oussama application hosted on NGINX")

        # =============================================================================
        #   CODE BUILD and ECS TASK ROLES for the Blue/ Green deployment
        # =============================================================================

        # IAM role for the Code Build project
        codeBuildServiceRole = aws_iam.Role(
            self,
            "codeBuildServiceRole",
            assumed_by=aws_iam.ServicePrincipal('codebuild.amazonaws.com'))

        inlinePolicyForCodeBuild = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "ecr:GetAuthorizationToken", "ecr:BatchCheckLayerAvailability",
                "ecr:InitiateLayerUpload", "ecr:UploadLayerPart",
                "ecr:CompleteLayerUpload", "ecr:PutImage"
            ],
            resources=["*"])

        codeBuildServiceRole.add_to_policy(inlinePolicyForCodeBuild)

        # ECS task role
        ecsTaskRole = aws_iam.Role(
            self,
            "ecsTaskRoleForWorkshop",
            assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com'))

        ecsTaskRole.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                "service-role/AmazonECSTaskExecutionRolePolicy"))

        # =============================================================================
        # CODE DEPLOY APPLICATION for the Blue/ Green deployment
        # =============================================================================

        # Creating the code deploy application
        codeDeployApplication = codedeploy.EcsApplication(
            self, "NginxAppCodeDeploy")

        # Creating the code deploy service role
        codeDeployServiceRole = aws_iam.Role(
            self,
            "codeDeployServiceRole",
            assumed_by=aws_iam.ServicePrincipal('codedeploy.amazonaws.com'))
        codeDeployServiceRole.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                "AWSCodeDeployRoleForECS"))

        # IAM role for custom lambda function
        customLambdaServiceRole = aws_iam.Role(
            self,
            "codeDeployCustomLambda",
            assumed_by=aws_iam.ServicePrincipal('lambda.amazonaws.com'))

        inlinePolicyForLambda = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "iam:PassRole", "sts:AssumeRole", "codedeploy:List*",
                "codedeploy:Get*", "codedeploy:UpdateDeploymentGroup",
                "codedeploy:CreateDeploymentGroup",
                "codedeploy:DeleteDeploymentGroup"
            ],
            resources=["*"])

        customLambdaServiceRole.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                'service-role/AWSLambdaBasicExecutionRole'))
        customLambdaServiceRole.add_to_policy(inlinePolicyForLambda)

        # Custom resource to create the deployment group
        createDeploymentGroupLambda = aws_lambda.Function(
            self,
            'createDeploymentGroupLambda',
            code=aws_lambda.Code.from_asset("custom_resources"),
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            handler='create_deployment_group.handler',
            role=customLambdaServiceRole,
            description="Custom resource to create deployment group",
            memory_size=128,
            timeout=core.Duration.seconds(60))

        # ================================================================================================
        # CloudWatch Alarms for 4XX errors
        blue4xxMetric = aws_cloudwatch.Metric(
            namespace='AWS/ApplicationELB',
            metric_name='HTTPCode_Target_4XX_Count',
            dimensions={
                "TargetGroup": blueGroup.target_group_full_name,
                "LoadBalancer": alb.load_balancer_full_name
            },
            statistic="sum",
            period=core.Duration.minutes(1))

        blueGroupAlarm = aws_cloudwatch.Alarm(
            self,
            "blue4xxErrors",
            alarm_name="Blue_4xx_Alarm",
            alarm_description=
            "CloudWatch Alarm for the 4xx errors of Blue target group",
            metric=blue4xxMetric,
            threshold=1,
            evaluation_periods=1)

        green4xxMetric = aws_cloudwatch.Metric(
            namespace='AWS/ApplicationELB',
            metric_name='HTTPCode_Target_4XX_Count',
            dimensions={
                "TargetGroup": greenGroup.target_group_full_name,
                "LoadBalancer": alb.load_balancer_full_name
            },
            statistic="sum",
            period=core.Duration.minutes(1))
        greenGroupAlarm = aws_cloudwatch.Alarm(
            self,
            "green4xxErrors",
            alarm_name="Green_4xx_Alarm",
            alarm_description=
            "CloudWatch Alarm for the 4xx errors of Green target group",
            metric=green4xxMetric,
            threshold=1,
            evaluation_periods=1)

        # ================================================================================================
        # DUMMY TASK DEFINITION for the initial service creation
        # This is required for the service being made available to create the CodeDeploy Deployment Group
        # ================================================================================================
        sampleTaskDefinition = aws_ecs.FargateTaskDefinition(
            self,
            "sampleTaskDefn",
            family=DUMMY_TASK_FAMILY_NAME,
            cpu=256,
            memory_limit_mib=1024,
            task_role=ecsTaskRole,
            execution_role=ecsTaskRole)

        sampleContainerDefn = sampleTaskDefinition.add_container(
            "sampleAppContainer",
            image=aws_ecs.ContainerImage.from_registry(DUMMY_CONTAINER_IMAGE),
            logging=aws_ecs.AwsLogDriver(log_group=aws_logs.LogGroup(
                self,
                "sampleAppLogGroup",
                log_group_name=DUMMY_APP_LOG_GROUP_NAME,
                removal_policy=core.RemovalPolicy.DESTROY),
                                         stream_prefix=DUMMY_APP_NAME),
            docker_labels={"name": DUMMY_APP_NAME})

        port_mapping = aws_ecs.PortMapping(container_port=80,
                                           protocol=aws_ecs.Protocol.TCP)

        sampleContainerDefn.add_port_mappings(port_mapping)

        # ================================================================================================
        # ECS task definition using ECR image
        # Will be used by the CODE DEPLOY for Blue/Green deployment
        # ================================================================================================
        NginxTaskDefinition = aws_ecs.FargateTaskDefinition(
            self,
            "appTaskDefn",
            family=ECS_TASK_FAMILY_NAME,
            cpu=256,
            memory_limit_mib=1024,
            task_role=ecsTaskRole,
            execution_role=ecsTaskRole)

        NginxcontainerDefinition = NginxTaskDefinition.add_container(
            "NginxAppContainer",
            image=aws_ecs.ContainerImage.from_ecr_repository(
                NginxecrRepo, "latest"),
            logging=aws_ecs.AwsLogDriver(log_group=aws_logs.LogGroup(
                self,
                "NginxAppLogGroup",
                log_group_name=ECS_APP_LOG_GROUP_NAME,
                removal_policy=core.RemovalPolicy.DESTROY),
                                         stream_prefix=ECS_APP_NAME),
            docker_labels={"name": ECS_APP_NAME})
        NginxcontainerDefinition.add_port_mappings(port_mapping)

        # =============================================================================
        # ECS SERVICE for the Blue/ Green deployment
        # =============================================================================
        NginxAppService = aws_ecs.FargateService(
            self,
            "NginxAppService",
            cluster=ecs_cluster,
            task_definition=NginxTaskDefinition,
            health_check_grace_period=core.Duration.seconds(10),
            desired_count=3,
            deployment_controller={
                "type": aws_ecs.DeploymentControllerType.CODE_DEPLOY
            },
            service_name=ECS_APP_NAME)

        NginxAppService.connections.allow_from(alb, aws_ec2.Port.tcp(80))
        NginxAppService.connections.allow_from(alb, aws_ec2.Port.tcp(8080))
        NginxAppService.attach_to_application_target_group(blueGroup)

        # =============================================================================
        # CODE DEPLOY - Deployment Group CUSTOM RESOURCE for the Blue/ Green deployment
        # =============================================================================

        core.CustomResource(
            self,
            'customEcsDeploymentGroup',
            service_token=createDeploymentGroupLambda.function_arn,
            properties={
                "ApplicationName": codeDeployApplication.application_name,
                "DeploymentGroupName": ECS_DEPLOYMENT_GROUP_NAME,
                "DeploymentConfigName": ECS_DEPLOYMENT_CONFIG_NAME,
                "ServiceRoleArn": codeDeployServiceRole.role_arn,
                "BlueTargetGroup": blueGroup.target_group_name,
                "GreenTargetGroup": greenGroup.target_group_name,
                "ProdListenerArn": albProdListener.listener_arn,
                "TestListenerArn": albTestListener.listener_arn,
                "EcsClusterName": ecs_cluster.cluster_name,
                "EcsServiceName": NginxAppService.service_name,
                "TerminationWaitTime": ECS_TASKSET_TERMINATION_WAIT_TIME,
                "BlueGroupAlarm": blueGroupAlarm.alarm_name,
                "GreenGroupAlarm": greenGroupAlarm.alarm_name,
            })

        ecsDeploymentGroup = codedeploy.EcsDeploymentGroup.from_ecs_deployment_group_attributes(
            self,
            "ecsDeploymentGroup",
            application=codeDeployApplication,
            deployment_group_name=ECS_DEPLOYMENT_GROUP_NAME,
            deployment_config=codedeploy.EcsDeploymentConfig.
            from_ecs_deployment_config_name(self, "ecsDeploymentConfig",
                                            ECS_DEPLOYMENT_CONFIG_NAME))

        # =============================================================================
        # CODE BUILD PROJECT for the Blue/ Green deployment
        # =============================================================================

        # Creating the code build project
        NginxAppcodebuild = aws_codebuild.Project(
            self,
            "NginxAppCodeBuild",
            role=codeBuildServiceRole,
            environment=aws_codebuild.BuildEnvironment(
                build_image=aws_codebuild.LinuxBuildImage.STANDARD_4_0,
                compute_type=aws_codebuild.ComputeType.SMALL,
                privileged=True,
                environment_variables={
                    'REPOSITORY_URI': {
                        'value':
                        NginxecrRepo.repository_uri,
                        'type':
                        aws_codebuild.BuildEnvironmentVariableType.PLAINTEXT
                    },
                    'TASK_EXECUTION_ARN': {
                        'value':
                        ecsTaskRole.role_arn,
                        'type':
                        aws_codebuild.BuildEnvironmentVariableType.PLAINTEXT
                    },
                    'TASK_FAMILY': {
                        'value':
                        ECS_TASK_FAMILY_NAME,
                        'type':
                        aws_codebuild.BuildEnvironmentVariableType.PLAINTEXT
                    }
                }),
            source=aws_codebuild.Source.code_commit(
                repository=NginxCodeCommitrepo))

        # =============================================================================
        # CODE PIPELINE for Blue/Green ECS deployment
        # =============================================================================

        codePipelineServiceRole = aws_iam.Role(
            self,
            "codePipelineServiceRole",
            assumed_by=aws_iam.ServicePrincipal('codepipeline.amazonaws.com'))

        inlinePolicyForCodePipeline = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "iam:PassRole", "sts:AssumeRole", "codecommit:Get*",
                "codecommit:List*", "codecommit:GitPull",
                "codecommit:UploadArchive", "codecommit:CancelUploadArchive",
                "codebuild:BatchGetBuilds", "codebuild:StartBuild",
                "codedeploy:CreateDeployment", "codedeploy:Get*",
                "codedeploy:RegisterApplicationRevision", "s3:Get*",
                "s3:List*", "s3:PutObject"
            ],
            resources=["*"])

        codePipelineServiceRole.add_to_policy(inlinePolicyForCodePipeline)

        sourceArtifact = codepipeline.Artifact('sourceArtifact')
        buildArtifact = codepipeline.Artifact('buildArtifact')

        # S3 bucket for storing the code pipeline artifacts
        NginxAppArtifactsBucket = s3.Bucket(
            self,
            "NginxAppArtifactsBucket",
            encryption=s3.BucketEncryption.S3_MANAGED,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL)

        # S3 bucket policy for the code pipeline artifacts
        denyUnEncryptedObjectUploads = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.DENY,
            actions=["s3:PutObject"],
            principals=[aws_iam.AnyPrincipal()],
            resources=[NginxAppArtifactsBucket.bucket_arn + "/*"],
            conditions={
                "StringNotEquals": {
                    "s3:x-amz-server-side-encryption": "aws:kms"
                }
            })

        denyInsecureConnections = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.DENY,
            actions=["s3:*"],
            principals=[aws_iam.AnyPrincipal()],
            resources=[NginxAppArtifactsBucket.bucket_arn + "/*"],
            conditions={"Bool": {
                "aws:SecureTransport": "false"
            }})

        NginxAppArtifactsBucket.add_to_resource_policy(
            denyUnEncryptedObjectUploads)
        NginxAppArtifactsBucket.add_to_resource_policy(denyInsecureConnections)

        # Code Pipeline - CloudWatch trigger event is created by CDK
        codepipeline.Pipeline(
            self,
            "ecsBlueGreen",
            role=codePipelineServiceRole,
            artifact_bucket=NginxAppArtifactsBucket,
            stages=[
                codepipeline.StageProps(
                    stage_name='Source',
                    actions=[
                        aws_codepipeline_actions.CodeCommitSourceAction(
                            action_name='Source',
                            repository=NginxCodeCommitrepo,
                            output=sourceArtifact,
                        )
                    ]),
                codepipeline.StageProps(
                    stage_name='Build',
                    actions=[
                        aws_codepipeline_actions.CodeBuildAction(
                            action_name='Build',
                            project=NginxAppcodebuild,
                            input=sourceArtifact,
                            outputs=[buildArtifact])
                    ]),
                codepipeline.StageProps(
                    stage_name='Deploy',
                    actions=[
                        aws_codepipeline_actions.CodeDeployEcsDeployAction(
                            action_name='Deploy',
                            deployment_group=ecsDeploymentGroup,
                            app_spec_template_input=buildArtifact,
                            task_definition_template_input=buildArtifact,
                        )
                    ])
            ])

        # =============================================================================
        # Export the outputs
        # =============================================================================
        core.CfnOutput(self,
                       "ecsBlueGreenCodeRepo",
                       description="Demo app code commit repository",
                       export_name="ecsBlueGreenDemoAppRepo",
                       value=NginxCodeCommitrepo.repository_clone_url_http)

        core.CfnOutput(self,
                       "ecsBlueGreenLBDns",
                       description="Load balancer DNS",
                       export_name="ecsBlueGreenLBDns",
                       value=alb.load_balancer_dns_name)
Exemplo n.º 28
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)
        # Image Bucket
        image_bucket = s3.Bucket(self,
                                 IMG_BUCKET_NAME,
                                 removal_policy=cdk.RemovalPolicy.DESTROY)
        cdk.CfnOutput(self, "imageBucket", value=image_bucket.bucket_name)

        image_bucket.add_cors_rule(
            allowed_methods=[s3.HttpMethods.GET, s3.HttpMethods.PUT],
            allowed_origins=["*"],
            allowed_headers=["*"],
            max_age=3000,
        )

        # Thumbnail Bucket
        resized_image_bucket = s3.Bucket(
            self,
            RESIZED_IMG_BUCKET_NAME,
            removal_policy=cdk.RemovalPolicy.DESTROY)
        cdk.CfnOutput(self,
                      "resizedBucket",
                      value=resized_image_bucket.bucket_name)

        resized_image_bucket.add_cors_rule(
            allowed_methods=[s3.HttpMethods.GET, s3.HttpMethods.PUT],
            allowed_origins=["*"],
            allowed_headers=["*"],
            max_age=3000,
        )
        # S3 Static bucket for website code
        web_bucket = s3.Bucket(
            self,
            WEBSITE_BUCKET_NAME,
            website_index_document="index.html",
            website_error_document="index.html",
            removal_policy=cdk.RemovalPolicy.DESTROY,
            # uncomment this and delete the policy statement below to allow public access to our
            # static website
            # public_read_access=true
        )

        web_policy_statement = iam.PolicyStatement(
            actions=["s3:GetObject"],
            resources=[web_bucket.arn_for_objects("*")],
            principals=[iam.AnyPrincipal()],
            conditions={"IpAddress": {
                "aws:SourceIp": ["139.138.203.36"]
            }},
        )

        web_bucket.add_to_resource_policy(web_policy_statement)

        cdk.CfnOutput(self,
                      "bucketURL",
                      value=web_bucket.bucket_website_domain_name)

        # Deploy site contents to S3 Bucket
        s3_dep.BucketDeployment(
            self,
            "DeployWebsite",
            sources=[s3_dep.Source.asset("./public")],
            destination_bucket=web_bucket,
        )

        # DynamoDB to store image labels
        partition_key = dynamodb.Attribute(name="image",
                                           type=dynamodb.AttributeType.STRING)
        table = dynamodb.Table(
            self,
            "ImageLabels",
            partition_key=partition_key,
            removal_policy=cdk.RemovalPolicy.DESTROY,
        )
        cdk.CfnOutput(self, "ddbTable", value=table.table_name)

        # Lambda layer for Pillow library
        layer = lb.LayerVersion(
            self,
            "pil",
            code=lb.Code.from_asset("reklayer"),
            compatible_runtimes=[lb.Runtime.PYTHON_3_7],
            license="Apache-2.0",
            description=
            "A layer to enable the PIL library in our Rekognition Lambda",
        )

        # Lambda function
        rek_fn = lb.Function(
            self,
            "rekognitionFunction",
            code=lb.Code.from_asset("rekognitionFunction"),
            runtime=lb.Runtime.PYTHON_3_7,
            handler="index.handler",
            timeout=cdk.Duration.seconds(30),
            memory_size=1024,
            layers=[layer],
            environment={
                "TABLE": table.table_name,
                "BUCKET": image_bucket.bucket_name,
                "THUMBBUCKET": resized_image_bucket.bucket_name,
            },
        )

        image_bucket.grant_read(rek_fn)
        resized_image_bucket.grant_write(rek_fn)
        table.grant_write_data(rek_fn)

        rek_fn.add_to_role_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                actions=["rekognition:DetectLabels"],
                                resources=["*"]))

        # Lambda for Synchronous front end
        serviceFn = lb.Function(
            self,
            "serviceFunction",
            code=lb.Code.from_asset("servicelambda"),
            runtime=lb.Runtime.PYTHON_3_7,
            handler="index.handler",
            environment={
                "TABLE": table.table_name,
                "BUCKET": image_bucket.bucket_name,
                "RESIZEDBUCKET": resized_image_bucket.bucket_name,
            },
        )

        image_bucket.grant_write(serviceFn)
        resized_image_bucket.grant_write(serviceFn)
        table.grant_read_write_data(serviceFn)

        # Cognito User Pool Auth
        auto_verified_attrs = cognito.AutoVerifiedAttrs(email=True)
        sign_in_aliases = cognito.SignInAliases(email=True, username=True)
        user_pool = cognito.UserPool(
            self,
            "UserPool",
            self_sign_up_enabled=True,
            auto_verify=auto_verified_attrs,
            sign_in_aliases=sign_in_aliases,
        )

        user_pool_client = cognito.UserPoolClient(self,
                                                  "UserPoolClient",
                                                  user_pool=user_pool,
                                                  generate_secret=False)

        identity_pool = cognito.CfnIdentityPool(
            self,
            "ImageRekognitionIdentityPool",
            allow_unauthenticated_identities=False,
            cognito_identity_providers=[{
                "clientId":
                user_pool_client.user_pool_client_id,
                "providerName":
                user_pool.user_pool_provider_name,
            }],
        )

        # API Gateway
        cors_options = apigw.CorsOptions(allow_origins=apigw.Cors.ALL_ORIGINS,
                                         allow_methods=apigw.Cors.ALL_METHODS)
        api = apigw.LambdaRestApi(
            self,
            "imageAPI",
            default_cors_preflight_options=cors_options,
            handler=serviceFn,
            proxy=False,
        )

        auth = apigw.CfnAuthorizer(
            self,
            "ApiGatewayAuthorizer",
            name="customer-authorizer",
            identity_source="method.request.header.Authorization",
            provider_arns=[user_pool.user_pool_arn],
            rest_api_id=api.rest_api_id,
            # type=apigw.AuthorizationType.COGNITO,
            type="COGNITO_USER_POOLS",
        )

        assumed_by = iam.FederatedPrincipal(
            "cognito-identity.amazon.com",
            conditions={
                "StringEquals": {
                    "cognito-identity.amazonaws.com:aud": identity_pool.ref
                },
                "ForAnyValue:StringLike": {
                    "cognito-identity.amazonaws.com:amr": "authenticated"
                },
            },
            assume_role_action="sts:AssumeRoleWithWebIdentity",
        )
        authenticated_role = iam.Role(
            self,
            "ImageRekognitionAuthenticatedRole",
            assumed_by=assumed_by,
        )
        # IAM policy granting users permission to get and put their pictures
        policy_statement = iam.PolicyStatement(
            actions=["s3:GetObject", "s3:PutObject"],
            effect=iam.Effect.ALLOW,
            resources=[
                image_bucket.bucket_arn +
                "/private/${cognito-identity.amazonaws.com:sub}/*",
                image_bucket.bucket_arn +
                "/private/${cognito-identity.amazonaws.com:sub}/",
                resized_image_bucket.bucket_arn +
                "/private/${cognito-identity.amazonaws.com:sub}/*",
                resized_image_bucket.bucket_arn +
                "/private/${cognito-identity.amazonaws.com:sub}/",
            ],
        )

        # IAM policy granting users permission to list their pictures
        list_policy_statement = iam.PolicyStatement(
            actions=["s3:ListBucket"],
            effect=iam.Effect.ALLOW,
            resources=[
                image_bucket.bucket_arn, resized_image_bucket.bucket_arn
            ],
            conditions={
                "StringLike": {
                    "s3:prefix":
                    ["private/${cognito-identity.amazonaws.com:sub}/*"]
                }
            },
        )

        authenticated_role.add_to_policy(policy_statement)
        authenticated_role.add_to_policy(list_policy_statement)

        # Attach role to our Identity Pool
        cognito.CfnIdentityPoolRoleAttachment(
            self,
            "IdentityPoolRoleAttachment",
            identity_pool_id=identity_pool.ref,
            roles={"authenticated": authenticated_role.role_arn},
        )

        # Get some outputs from cognito
        cdk.CfnOutput(self, "UserPoolId", value=user_pool.user_pool_id)
        cdk.CfnOutput(self,
                      "AppClientId",
                      value=user_pool_client.user_pool_client_id)
        cdk.CfnOutput(self, "IdentityPoolId", value=identity_pool.ref)

        # New Amazon API Gateway with AWS Lambda Integration
        success_response = apigw.IntegrationResponse(
            status_code="200",
            response_parameters={
                "method.response.header.Access-Control-Allow-Origin": "'*'"
            },
        )
        error_response = apigw.IntegrationResponse(
            selection_pattern="(\n|.)+",
            status_code="500",
            response_parameters={
                "method.response.header.Access-Control-Allow-Origin": "'*'"
            },
        )

        request_template = json.dumps({
            "action":
            "$util.escapeJavaScript($input.params('action'))",
            "key":
            "$util.escapeJavaScript($input.params('key'))",
        })

        lambda_integration = apigw.LambdaIntegration(
            serviceFn,
            proxy=False,
            request_parameters={
                "integration.request.querystring.action":
                "method.request.querystring.action",
                "integration.request.querystring.key":
                "method.request.querystring.key",
            },
            request_templates={"application/json": request_template},
            passthrough_behavior=apigw.PassthroughBehavior.WHEN_NO_TEMPLATES,
            integration_responses=[success_response, error_response],
        )

        imageAPI = api.root.add_resource("images")

        success_resp = apigw.MethodResponse(
            status_code="200",
            response_parameters={
                "method.response.header.Access-Control-Allow-Origin": True
            },
        )
        error_resp = apigw.MethodResponse(
            status_code="500",
            response_parameters={
                "method.response.header.Access-Control-Allow-Origin": True
            },
        )

        # GET /images
        get_method = imageAPI.add_method(
            "GET",
            lambda_integration,
            authorization_type=apigw.AuthorizationType.COGNITO,
            request_parameters={
                "method.request.querystring.action": True,
                "method.request.querystring.key": True,
            },
            method_responses=[success_resp, error_resp],
        )
        # DELETE /images
        delete_method = imageAPI.add_method(
            "DELETE",
            lambda_integration,
            authorization_type=apigw.AuthorizationType.COGNITO,
            request_parameters={
                "method.request.querystring.action": True,
                "method.request.querystring.key": True,
            },
            method_responses=[success_resp, error_resp],
        )

        # Override the authorizer id because it doesn't work when defininting it as a param
        # in add_method
        get_method_resource = get_method.node.find_child("Resource")
        get_method_resource.add_property_override("AuthorizerId", auth.ref)
        delete_method_resource = delete_method.node.find_child("Resource")
        delete_method_resource.add_property_override("AuthorizerId", auth.ref)

        # Building SQS queue and DeadLetter Queue
        dl_queue = sqs.Queue(
            self,
            "ImageDLQueue",
            queue_name="ImageDLQueue",
        )

        dl_queue_opts = sqs.DeadLetterQueue(max_receive_count=2,
                                            queue=dl_queue)

        queue = sqs.Queue(
            self,
            "ImageQueue",
            queue_name="ImageQueue",
            visibility_timeout=cdk.Duration.seconds(30),
            receive_message_wait_time=cdk.Duration.seconds(20),
            dead_letter_queue=dl_queue_opts,
        )

        # S3 Bucket Create Notification to SQS
        # Whenever an image is uploaded add it to the queue

        image_bucket.add_object_created_notification(
            s3n.SqsDestination(queue),
            s3.NotificationKeyFilter(prefix="private/"))
Exemplo n.º 29
0
    def __init__(self, scope: Cdk.Construct, id: str, **kwargs) -> None:
        super().__init__(
            scope,
            id,
            description=
            "2.2.1 CloudFormation Custom Resource Lambda for invoking cross-region Custom Resources",
            **kwargs)

        allowedRoleArnsParameter = Cdk.CfnParameter(
            self,
            "AllowedRoleArns",
            description=
            "Role ARNs to allow to publish to the SNS topic of the Custom Resource Proxy to enable cross-account use",
            type="CommaDelimitedList")
        organizationIdParameter = Cdk.CfnParameter(
            self,
            "OrganizationId",
            description=
            "Organization ID to use to allow access to the SNS topic of the Custom Resource Proxy",
            type="String")

        path = os.path.join(os.path.dirname(__file__), "src/app.py")
        with open(path) as f:
            code = Lambda.Code.from_inline(f.read())

        function = Lambda.Function(
            self,
            "Function",
            code=code,
            function_name="LittleOrangeCustomResourceProxy",
            handler="index.handler",
            runtime=Lambda.Runtime.PYTHON_3_7,
            timeout=Cdk.Duration.seconds(30))

        topic = Sns.Topic(self,
                          "Topic",
                          topic_name="LittleOrangeCustomResourceProxy")

        topic.add_to_resource_policy(statement=Iam.PolicyStatement(
            sid="OrganizationsCloudFormationAccess",
            actions=["sns:Publish"],
            conditions={
                "StringLike": {
                    "aws:PrincipalArn": allowedRoleArnsParameter.value_as_list
                },
                "StringEquals": {
                    "aws:CalledViaLast": "cloudformation.amazonaws.com",
                    "aws:PrincipalOrgID":
                    organizationIdParameter.value_as_string
                }
            },
            effect=Iam.Effect.ALLOW,
            principals=[Iam.AnyPrincipal()],
            resources=[topic.topic_arn]))

        subscription = Sns.Subscription(
            self,
            "TopicSubscription",
            topic=topic,
            endpoint=function.function_arn,
            protocol=Sns.SubscriptionProtocol.LAMBDA)

        function.add_permission(
            "CloudFormationPermission",
            principal=Iam.ServicePrincipal("cloudformation.amazonaws.com"),
            action="lambda:InvokeFunction")

        function.add_permission(
            "SNSPermission",
            principal=Iam.ServicePrincipal("sns.amazonaws.com"),
            action="lambda:InvokeFunction",
            source_arn=topic.topic_arn)

        function.add_to_role_policy(
            Iam.PolicyStatement(actions=["lambda:InvokeFunction"],
                                effect=Iam.Effect.ALLOW,
                                resources=["*"]))

        parameter = Ssm.StringParameter(
            self,
            "ServiceTokenParameter",
            parameter_name=
            "/LittleOrange/CloudFormation/CustomResourceProxyServiceToken",
            description=
            "Lambda ARN for the Custom Resource Proxy CloudFormation Custom Resource in this region",
            type=Ssm.ParameterType.STRING,
            string_value=function.function_arn)

        parameterSns = Ssm.StringParameter(
            self,
            "SNSServiceTokenParameter",
            parameter_name=
            "/LittleOrange/CloudFormation/CustomResourceProxySNSServiceToken",
            description=
            "SNS Topic ARN for the Custom Resource Proxy CloudFormation Custom Resource in this region",
            type=Ssm.ParameterType.STRING,
            string_value=topic.topic_arn)

        output = Cdk.CfnOutput(self,
                               "ServiceToken",
                               value=function.function_arn)

        snsOutput = Cdk.CfnOutput(self,
                                  "SNSServiceToken",
                                  value=topic.topic_arn)
Exemplo n.º 30
0
    def __init__(self, scope: Construct, cid: str,
                 application_names: List[str], instance_ids: Dict[str,
                                                                  List[str]],
                 instance_roles_map: Dict[str, _iam.IRole],
                 endpoint_sg: _ec2.ISecurityGroup, vpc: _ec2.Vpc) -> None:

        super().__init__(scope, cid)
        principals = []

        for application_name in application_names:
            for instance_id in instance_ids[application_name]:
                principals.append(
                    _iam.ArnPrincipal(
                        arn="arn:aws:sts::" + self.account + ":assumed-role/" +
                        instance_roles_map[application_name].role_name + "/" +
                        instance_id))

        self.create_interface_endpoint(
            "ssm",
            security_group=endpoint_sg,
            interface_endpoint_policy=_iam.PolicyStatement(
                effect=_iam.Effect.ALLOW,
                actions=[
                    "ssm:DescribeAssociation",
                    "ssm:GetDeployablePatchSnapshotForInstance",
                    "ssm:GetDocument", "ssm:DescribeDocument",
                    "ssm:GetManifest", "ssm:GetParameter", "ssm:GetParameters",
                    "ssm:ListAssociations", "ssm:ListInstanceAssociations",
                    "ssm:PutInventory", "ssm:PutComplianceItems",
                    "ssm:PutConfigurePackageResult",
                    "ssm:UpdateAssociationStatus",
                    "ssm:UpdateInstanceAssociationStatus",
                    "ssm:UpdateInstanceInformation"
                ],
                resources=["*"],
                principals=principals),
            vpc=vpc)
        self.create_interface_endpoint(
            "ec2",
            security_group=endpoint_sg,
            interface_endpoint_policy=_iam.PolicyStatement(
                effect=_iam.Effect.ALLOW,
                actions=["ec2:Describe*"],
                resources=["*"],
                principals=principals),
            vpc=vpc)
        self.create_interface_endpoint(
            "ssmmessages",
            security_group=endpoint_sg,
            interface_endpoint_policy=_iam.PolicyStatement(
                effect=_iam.Effect.ALLOW,
                actions=[
                    "ssmmessages:CreateControlChannel",
                    "ssmmessages:CreateDataChannel",
                    "ssmmessages:OpenControlChannel",
                    "ssmmessages:OpenDataChannel"
                ],
                resources=["*"],
                principals=principals),
            vpc=vpc)
        self.create_interface_endpoint(
            "ec2messages",
            security_group=endpoint_sg,
            interface_endpoint_policy=_iam.PolicyStatement(
                effect=_iam.Effect.ALLOW,
                actions=[
                    "ec2messages:AcknowledgeMessage",
                    "ec2messages:DeleteMessage", "ec2messages:FailMessage",
                    "ec2messages:GetEndpoint", "ec2messages:GetMessages",
                    "ec2messages:SendReply"
                ],
                resources=["*"],
                principals=principals),
            vpc=vpc)
        self.create_interface_endpoint(
            "logs",
            security_group=endpoint_sg,
            interface_endpoint_policy=_iam.PolicyStatement(
                effect=_iam.Effect.ALLOW,
                actions=[
                    "logs:PutLogEvents", "logs:DescribeLogStreams",
                    "logs:DescribeLogGroups", "logs:CreateLogStream",
                    "logs:CreateLogGroup"
                ],
                resources=["*"],
                principals=principals),
            vpc=vpc)
        self.create_interface_endpoint(
            "monitoring",
            security_group=endpoint_sg,
            interface_endpoint_policy=_iam.PolicyStatement(
                effect=_iam.Effect.ALLOW,
                actions=["cloudwatch:PutMetricData"],
                resources=["*"],
                principals=principals),
            vpc=vpc)

        self.create_gateway_endpoint(
            "s3",
            vpc=vpc,
            gateway_endpoint_policy=_iam.PolicyStatement(
                effect=_iam.Effect.ALLOW,
                actions=["s3:GetObject", "s3:PutObject"],
                resources=[
                    "arn:aws:s3:::aws-ssm-" + self.region + "/*",
                    "arn:aws:s3:::aws-windows-downloads-" + self.region + "/*",
                    "arn:aws:s3:::amazon-ssm-" + self.region + "/*",
                    "arn:aws:s3:::amazon-ssm-packages-" + self.region + "/*",
                    "arn:aws:s3:::" + self.region + "-birdwatcher-prod/*",
                    "arn:aws:s3:::aws-ssm-distributor-file-" + self.region +
                    "/*", "arn:aws:s3:::patch-baseline-snapshot-" +
                    self.region + "/*",
                    "arn:aws:s3:::amazoncloudwatch-agent-" + self.region +
                    "/*", "arn:aws:s3:::" +
                    self.node.try_get_context("qs_s3_bucket") + "-" +
                    self.region + "/*"
                ],
                principals=[_iam.AnyPrincipal()]))