Пример #1
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        prj_name = self.node.try_get_context("project_name")
        env_name = self.node.try_get_context("env")

        account_id = core.Aws.ACCOUNT_ID

        #To Store Frontend app

        FlaskFrontendBucket = s3.Bucket(self, 'FlaskFrontendWebsite',
            #access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            bucket_name=account_id+'-'+env_name+'-frontend',
            access_control=s3.BucketAccessControl.PUBLIC_READ,
            # block_public_access=s3.BlockPublicAccess(
            #     block_public_acls=True,
            #     block_public_policy=True,
            #     ignore_public_acls=True,
            #     restrict_public_buckets=True,
            # ),
            removal_policy=core.RemovalPolicy.DESTROY
        )
        
        policy_statement = iam.PolicyStatement(
            actions=["s3:GetObject"],
            resources=[f"{FlaskFrontendBucket.bucket_arn}/*"],
        )

        policy_statement.add_any_principal()

        static_site_policy_document = iam.PolicyDocument(
            statements=[policy_statement]
        )

        FlaskFrontendBucket.add_to_resource_policy(policy_statement)

        
        # The Origin Access Identity is a way to allow CloudFront
        # Access to the Website Bucket
        origin_access_identity = cfn.OriginAccessIdentity(
            self, "OriginAccessIdentity",
            comment="Allows Read-Access from CloudFront"
        )

        # We tell the website bucket to allow access from CloudFront
        FlaskFrontendBucket.grant_read(origin_access_identity)

        s3_deploy.BucketDeployment(self, "DeployFlaskFrontendWebsite",
            sources=[s3_deploy.Source.asset("./static-content")],
            destination_bucket=FlaskFrontendBucket,
           # destination_key_prefix=""
        )

        core.CfnOutput(self, 'S3FlaskFrontendExport',
            value = FlaskFrontendBucket.bucket_name,
            export_name='FlaskFrontendBucket'
        )
Пример #2
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        prod_configs = self.node.try_get_context('envs')['prod']

        custom_vpc = _ec2.Vpc(
            self,
            "customVpcId",
            cidr=prod_configs['vpc_configs']['vpc_cidr'],
            max_azs=2,
            nat_gateways=1,
            subnet_configuration=[
                _ec2.SubnetConfiguration(
                    name="publicSubnet",
                    cidr_mask=prod_configs['vpc_configs']['cidr_mask'],
                    subnet_type=_ec2.SubnetType.PUBLIC),
                _ec2.SubnetConfiguration(
                    name="privateSubnet",
                    cidr_mask=prod_configs['vpc_configs']['cidr_mask'],
                    subnet_type=_ec2.SubnetType.PRIVATE),
                _ec2.SubnetConfiguration(
                    name="dbSubnet",
                    cidr_mask=prod_configs['vpc_configs']['cidr_mask'],
                    subnet_type=_ec2.SubnetType.ISOLATED)
            ])

        core.Tag.add(custom_vpc, "Owner", "Mystique")

        core.CfnOutput(self,
                       "customVpcOutput",
                       value=custom_vpc.vpc_id,
                       export_name="customVpcId")

        my_bkt = _s3.Bucket(self, "custombktId")

        core.Tag.add(my_bkt, "Owner", "Mystique")

        # Resource in same account.
        bkt1 = _s3.Bucket.from_bucket_name(self, "MyImportedBuket",
                                           "sample-bkt-cdk-010")

        bkt2 = _s3.Bucket.from_bucket_arn(self, "crossAccountBucket",
                                          "arn:aws:s3:::SAMPLE-CROSS-BUCKET")

        core.CfnOutput(self, "myimportedbucket", value=bkt1.bucket_name)

        vpc2 = _ec2.Vpc.from_lookup(
            self,
            "importedVPC",
            # is_default=True,
            vpc_id="vpc-d0a193aa")

        core.CfnOutput(self, "importedVpc2", value=vpc2.vpc_id)

        peer_vpc = _ec2.CfnVPCPeeringConnection(self,
                                                "peerVpc12",
                                                peer_vpc_id=custom_vpc.vpc_id,
                                                vpc_id=vpc2.vpc_id)
Пример #3
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        blue_env = self.node.try_get_context("blue_env")
        green_env = self.node.try_get_context("green_env")
        app_name = self.node.try_get_context("app_name")

        bucket = s3.Bucket(
            self,
            'BlueGreenBucket',
            # The default removal policy is RETAIN, which means that cdk
            # destroy will not attempt to delete the new bucket, and it will
            # remain in your account until manually deleted. By setting the
            # policy to DESTROY, cdk destroy will attempt to delete the bucket,
            # but will error if the bucket is not empty.
            removal_policy=core.RemovalPolicy.DESTROY
            # NOT recommended for production code
        )

        handler = lmbda.Function(self,
                                 'BlueGreenLambda',
                                 runtime=lmbda.Runtime.PYTHON_3_6,
                                 code=lmbda.Code.asset('resources'),
                                 handler='blue_green.lambda_handler',
                                 environment={'BUCKET': bucket.bucket_name})

        bucket.grant_read_write(handler)

        repo = cc.Repository(
            self,
            'Repository',
            repository_name='MyRepositoryName',
        )

        pipeline = cp.Pipeline(self, 'MyFirstPipeline')

        source_stage = pipeline.add_stage(stage_name='Source')

        source_artifact = cp.Artifact('Source')

        source_action = cpactions.CodeCommitSourceAction(
            action_name='CodeCommit', repository=repo, output=source_artifact)

        source_stage.add_action(source_action)

        deploy_stage = pipeline.add_stage(stage_name='Deploy')

        lambda_action = cpactions.LambdaInvokeAction(
            action_name='InvokeAction',
            lambda_=handler,
            user_parameters={
                'blueEnvironment': blue_env,
                'greenEnvironment': green_env,
                'application': app_name
            },
            inputs=[source_artifact])

        deploy_stage.add_action(lambda_action)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Add your stack resources below):
        # Create an S3 Bucket for storing our web store assets
        kk_store = _s3.Bucket(self, "kkStore", versioned=True)

        # DynamoDB Table
        kk_store_assets_table = _dynamodb.Table(
            self,
            "kkStoreAssetsDDBTable",
            table_name="kk_store_assets_tables",
            partition_key=_dynamodb.Attribute(
                name="_id", type=_dynamodb.AttributeType.STRING),
            removal_policy=core.RemovalPolicy.DESTROY)

        # Read Lambda Code
        try:
            with open("advanced_use_cases/lambda_src/s3_event_processor.py",
                      mode="r") as f:
                kk_store_processor_fn_code = f.read()
        except OSError:
            print("Unable to read Lambda Function Code")

        # Deploy the lambda function
        kk_store_processor_fn = _lambda.Function(
            self,
            "kkStoreProcessorFn",
            function_name="kk_store_processor_fn",
            description="Process store events and update DDB",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="index.lambda_handler",
            code=_lambda.InlineCode(kk_store_processor_fn_code),
            timeout=core.Duration.seconds(3),
            reserved_concurrent_executions=1,
            environment={
                "LOG_LEVEL": "INFO",
                "DDB_TABLE_NAME": f"{kk_store_assets_table.table_name}"
            })

        # Add DynamoDB Write Privileges To Lambda
        kk_store_assets_table.grant_read_write_data(kk_store_processor_fn)

        # Create Custom Loggroup
        kk_store_lg = _logs.LogGroup(
            self,
            "kkStoreLogGroup",
            log_group_name=f"/aws/lambda/{kk_store_processor_fn.function_name}",
            removal_policy=core.RemovalPolicy.DESTROY,
            retention=_logs.RetentionDays.ONE_DAY)

        # Create s3 notification for lambda function
        kk_store_backend = _s3_notifications.LambdaDestination(
            kk_store_processor_fn)

        # Assign notification for the s3 event type (ex: OBJECT_CREATED)
        kk_store.add_event_notification(_s3.EventType.OBJECT_CREATED,
                                        kk_store_backend)
Пример #5
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        s3_bucket = aws_s3.Bucket(self,
                                  id='qa-app-client',
                                  bucket_name='qa-app-client',
                                  public_read_access=True,
                                  website_error_document='index.html',
                                  website_index_document='index.html')
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        bucket = aws_s3.Bucket(
                    self,
                    "mjls3Bucket1992323",
                    public_read_access=True
                )
        bucket
Пример #7
0
    def __init__(self, scope: core.Construct, id: builtins.str,
                 landing_zone: IVpcLandingZone) -> None:
        super().__init__(scope, id)
        self.__landing_zone = landing_zone

        # Setup DNS...
        self.trader_dns_zone = r53.PrivateHostedZone(
            self,
            'Trader',
            zone_name='trader.fsi'.format(landing_zone.zone_name.lower()),
            vpc=landing_zone.vpc,
            comment='HomeNet Financial Services Domain')

        # Create a key and delegate access to IAM...
        self.key = kms.Key(
            self,
            'Key',
            alias='homenet/fsi',
            enable_key_rotation=True,
            policy=iam.PolicyDocument(statements=[
                iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                    principals=[
                                        iam.AccountPrincipal(
                                            core.Stack.of(self).account)
                                    ],
                                    actions=['kms:*'],
                                    resources=['*'])
            ]))

        # Create central resources...
        self.tda_secret = sm.Secret(
            self,
            'AmeritradeSecrets',
            removal_policy=core.RemovalPolicy.DESTROY,
            secret_name='HomeNet-{}-Ameritrade-Secrets'.format(
                self.landing_zone.zone_name))

        self.bucket = s3.Bucket(self,
                                'Bucket',
                                bucket_name='homenet-{}.{}.trader.fsi'.format(
                                    self.landing_zone.zone_name,
                                    core.Stack.of(self).region).lower(),
                                versioned=True)

        r53.ARecord(self,
                    'BucketAlias',
                    zone=self.trader_dns_zone,
                    record_name=self.bucket.bucket_domain_name,
                    target=r53.RecordTarget.from_alias(
                        dns_targets.BucketWebsiteTarget(self.bucket)))

        # self.fspace = space.CfnEnvironment(self,'Finspace',
        #   name='HomeNet-FsiCoreSvc',
        #   kms_key_id= self.key.key_id,
        #   description="HomeNet Financial Servicing Catalog")
        self.finspace = FinSpaceEnvironment()
        self.key.grant_admin(iam.ServicePrincipal(service='finspace'))
 def __init__(self, scope: cdk.Construct, construct_id: str,
              **kwargs) -> None:
     super().__init__(scope, construct_id, **kwargs)
     name = "bucket-belisco-turma-5-cdk"
     s3.Bucket(self,
               id=name,
               bucket_name=name,
               block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
               encryption=s3.BucketEncryption.S3_MANAGED)
Пример #9
0
    def __init__(self, scope: core.Construct, construct_id: str,
       bucket: str,
       **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here
        s3.Bucket(self,'bucket-id',
            bucket_name='demo-'+bucket+'-865'
        )
Пример #10
0
 def create_asset_bucket(self):
     """
     Create S3 Bucket for assets"
     """
     return s3.Bucket(
         self,
         self.config.get("stack_name") + "_s3",
         removal_policy=core.RemovalPolicy.DESTROY
     )
Пример #11
0
 def create_redirect_bucket(self):
     return s3.Bucket(
         self,
         self.config.get("stack_name") + "_redirect",
         website_redirect={"host_name": self.sub_domain,
                           "protocol": s3.RedirectProtocol.HTTPS},
         removal_policy=core.RemovalPolicy.DESTROY,
         public_read_access=True
     )
Пример #12
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here
        _s3.Bucket(
            self,
            "myBucketID",
        )
Пример #13
0
    def __init__(self, scope: core.Construct, id: str, bucket_name: str, **kwargs) -> None:
        """
        deploys AWS S3 bucket
            Resources:
                AWS::S3::Bucket with your details
        """
        super().__init__(scope, id, **kwargs)

        s3.Bucket(self, id, bucket_name=bucket_name)
Пример #14
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        bucket = s3.Bucket(scope=self,
                           id="MyFirstBucket",
                           public_read_access=True,
                           removal_policy=cdk.RemovalPolicy.DESTROY,
                           versioned=True)
    def __init__(self, scope: cdk.Construct, id: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        bucket = s3.Bucket(self,
                           "Bucket",
                           removal_policy=cdk.RemovalPolicy.DESTROY)


# npm add @mobileposse/auto-delete-bucket # to empty contents first
Пример #16
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        _s3.Bucket(self,
                   "myBucketId",
                   bucket_name="cdk-project6676",
                   versioned=True,
                   encryption=_s3.BucketEncryption.KMS_MANAGED)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create S3 bucket here.
        bucket = aws_s3.Bucket(self, image_bucket_name)

        # CFN output for S3 bucket creation.
        core.CfnOutput(
            self,
            "image-bucket",
            value=bucket.bucket_name,
            description="Bucket for uploading images",
        )

        # Create dynamodb table for storing image labels here.
        table = aws_dynamodb.Table(
            self,
            "image-lables",
            partition_key=aws_dynamodb.Attribute(
                name="image", type=aws_dynamodb.AttributeType.STRING
            ),
        )

        # CFN output for dynamodb table creation.
        core.CfnOutput(
            self,
            "image-lables-ddb-table",
            value=table.table_name,
            description="DDB table for storing image lables.",
        )

        function = aws_lambda.Function(
            self,
            "rekognitionFunction",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            handler="handler.main",
            code=aws_lambda.Code.asset("./rekognitionLambda"),
            timeout=core.Duration.seconds(30),
            memory_size=1024,
        )
        function.add_environment("TABLE", table.table_name)
        function.add_environment("BUCKET", bucket.bucket_name)
        function.add_event_source(
            aws_lambda_event_sources.S3EventSource(
                bucket=bucket, events=[aws_s3.EventType.OBJECT_CREATED]
            )
        )
        bucket.grant_read(function)
        table.grant_write_data(function)

        function.add_to_role_policy(
            statement=aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                actions=["rekognition:DetectLabels"],
                resources=["*"],
            )
        )
Пример #18
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        _s3.Bucket(self,
                   "myBucketID",
                   bucket_name="myfirstcdkprojectbkt1004",
                   versioned=False,
                   encryption=_s3.BucketEncryption.S3_MANAGED,
                   block_public_access=_s3.BlockPublicAccess.BLOCK_ALL)

        mybucket = _s3.Bucket(self, "myBucketId1")
        print(mybucket.bucket_name)
        output_1 = core.CfnOutput(self,
                                  "myBucketOutput1",
                                  value=mybucket.bucket_name,
                                  description=f"My first cdk bucket",
                                  export_name="myBucketOutput1")
Пример #19
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        bucket = s3.Bucket(self,
                           "MyFirstBucket",
                           versioned=True,
                           removal_policy=core.RemovalPolicy.DESTROY,
                           auto_delete_objects=True)
Пример #20
0
    def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)
        
        #template = cfn_inc.CfnInclude(self, id='Template', template_file='template.yaml')
        # The code that defines your stack goes here
        bucket_names = 'config-1' + str(core.Aws.ACCOUNT_ID)
        sns_topic = _sns.Topic(self, id='topic-config', topic_name='config-topic')
        sns_topic.add_subscription(subscriptions.EmailSubscription("*****@*****.**"))
        bucket = s3.Bucket(self, id='s3cdkbuckets',bucket_name=bucket_names,versioned=True)
        bucket_arn2 = str(bucket.bucket_arn) + "/AWSLogs/" + str(core.Aws.ACCOUNT_ID) + "/Config/*"
        bucket_policy = bucket.add_to_resource_policy(iam.PolicyStatement(effect=iam.Effect.ALLOW, 
                                                                             resources=[bucket.bucket_arn],
                                                                             actions=["s3:GetBucketAcl"],
                                                                             sid = "AWSConfigBucketPermissionsCheck",
                                                                             principals=[iam.ServicePrincipal("config.amazonaws.com")]
                                                                             ))
        bucket_policy2 = bucket.add_to_resource_policy(iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                                                           resources=[bucket_arn2],
                                                                           actions=["s3:PutObject"],
                                                                           sid = "AWSConfigBucketDelivery",
                                                                           principals=[iam.ServicePrincipal("config.amazonaws.com")],
                                                                           conditions={"StringEquals": {
                                                                               "s3:x-amz-acl": "bucket-owner-full-control"}
                                                                                        }))
        recorder = config.CfnConfigurationRecorder(self,
                id='recorder',
                role_arn='arn:aws:iam::306646308112:role/aws-service-role/config.amazonaws.com/AWSServiceRoleForConfig',
                recording_group=None)
        channel = config.CfnDeliveryChannel(self,
                id='channel',
                s3_bucket_name=bucket.bucket_name,
                sns_topic_arn=sns_topic.topic_arn)
        time.sleep(20)
        srule = config.CfnConfigRule(self,
                id='rule1',
                source=config.CfnConfigRule.SourceProperty(owner="AWS",
                    source_identifier="REQUIRED_TAGS"),  
                input_parameters={"tag1Key":"tagVal"})
        srule2 = config.CfnConfigRule(self, id='rule2',
                 source=config.CfnConfigRule.SourceProperty(owner="AWS",
                    source_identifier="S3_BUCKET_LEVEL_PUBLIC_ACCESS_PROHIBITED"))
        srule3 = config.CfnConfigRule(self, id='rule3',
                 source=config.CfnConfigRule.SourceProperty(owner="AWS",
                    source_identifier="VPC_SG_OPEN_ONLY_TO_AUTHORIZED_PORTS"))
        srule.add_depends_on(recorder)
        srule2.add_depends_on(recorder)
        srule3.add_depends_on(recorder)
        event_rule = _events.Rule(self, id='event_rule', event_pattern = {
           "source": ["aws.config"],
           "detail": {
               "messageType": ["ConfigurationItemChangeNotification"],
               "newEvaluationResult": {
               "compliance_type": ["NON_COMPLIANT"]
    }
  }
})
        event_rule.add_target(targets.SnsTopic(sns_topic))
Пример #21
0
    def __init__(self, scope: core.Construct, _id: str, **kwargs) -> None:
        super().__init__(scope, _id, **kwargs)

        ddb_file_list = ddb.Table(self, "ddb",
                                  partition_key=ddb.Attribute(name="Key", type=ddb.AttributeType.STRING),
                                  billing_mode=ddb.BillingMode.PAY_PER_REQUEST)

        sqs_queue_DLQ = sqs.Queue(self, "sqs_DLQ",
                                  visibility_timeout=core.Duration.minutes(15),
                                  retention_period=core.Duration.days(14)
                                  )
        sqs_queue = sqs.Queue(self, "sqs_queue",
                              visibility_timeout=core.Duration.minutes(15),
                              retention_period=core.Duration.days(14),
                              dead_letter_queue=sqs.DeadLetterQueue(
                                  max_receive_count=100,
                                  queue=sqs_queue_DLQ
                              )
                              )
        handler = lam.Function(self, "lambdaFunction",
                               code=lam.Code.asset("./lambda"),
                               handler="lambda_function.lambda_handler",
                               runtime=lam.Runtime.PYTHON_3_8,
                               memory_size=1024,
                               timeout=core.Duration.minutes(15),
                               environment={
                                   'table_name': ddb_file_list.table_name,
                                   'queue_name': sqs_queue.queue_name,
                                   'Des_bucket_default': Des_bucket_default,
                                   'Des_prefix_default': Des_prefix_default,
                                   'Des_region': Des_region,
                                   'StorageClass': StorageClass,
                                   'aws_access_key_id': aws_access_key_id,
                                   'aws_secret_access_key': aws_secret_access_key
                               })
        ddb_file_list.grant_read_write_data(handler)
        handler.add_event_source(SqsEventSource(sqs_queue))

        s3bucket = s3.Bucket(self, "s3bucket")
        s3bucket.grant_read(handler)
        s3bucket.add_event_notification(s3.EventType.OBJECT_CREATED,
                                        s3n.SqsDestination(sqs_queue))

        # You can import an existing bucket and grant access to lambda
        # exist_s3bucket = s3.Bucket.from_bucket_name(self, "import_bucket",
        #                                             bucket_name="you_bucket_name")
        # exist_s3bucket.grant_read(handler)

        # But You have to add sqs as imported bucket event notification manually, it doesn't support by CloudFormation
        # An work around is to add on_cloud_trail_event for the bucket, but will trigger could_trail first
        # 因为是导入的Bucket,需要手工建Bucket Event Trigger SQS,以及设置SQS允许该bucekt触发的Permission

        core.CfnOutput(self, "DynamoDB_Table", value=ddb_file_list.table_name)
        core.CfnOutput(self, "SQS_Job_Queue", value=sqs_queue.queue_name)
        core.CfnOutput(self, "SQS_Job_Queue_DLQ", value=sqs_queue_DLQ.queue_name)
        core.CfnOutput(self, "Worker_Lambda_Function", value=handler.function_name)
        core.CfnOutput(self, "New_S3_Bucket", value=s3bucket.bucket_name)
Пример #22
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        bucket = s3.Bucket(
            self,
            "MyFirstBucket",
            versioned=True,
        )
Пример #23
0
    def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        prj_name = self.node.try_get_context("project_name")
        env_name = self.node.try_get_context("env")
        account_id = core.Aws.ACCOUNT_ID
        PROJECT_NUMBER = 2

        # To Store Frontend App
        frontend_bucket = s3.Bucket(self, "frontend",
                                    access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
                                    bucket_name=account_id + '-' + env_name + '-frontend',
                                    public_read_access=True,
                                    removal_policy=core.RemovalPolicy.DESTROY,
                                    website_index_document='index.html'
                                    )

        bucket_name = frontend_bucket.bucket_name

        github_token = core.SecretValue.secrets_manager("dev/github-token", json_field='github-from-marsApp')

        cb.GitHubSourceCredentials(self, "CodeBuildGitHubCreds",
                                          access_token=github_token
                                          )

        git_hub_source = cb.Source.git_hub(
            owner="manrodri",
            repo="30miniProjects",
            webhook=True,
            webhook_filters=[
                cb.FilterGroup.in_event_of(cb.EventAction.PUSH).and_branch_is(
                    "master").and_file_path_is('js30Projects/')
            ]
        )

        codebuild_project = cb.Project(
            self,
            "cb-frontend",
            source=git_hub_source,
            environment=cb.BuildEnvironment(
                build_image=cb.LinuxBuildImage.STANDARD_3_0,
                environment_variables={
                    'WEB_BUCKET_NAME': cb.BuildEnvironmentVariable(value=bucket_name),
                    'PROJECT_NUMBER': cb.BuildEnvironmentVariable(value=str(PROJECT_NUMBER))
                }
            ),
        )

        allow_object_actions = iam.PolicyStatement(resources=[f"arn:aws:s3:::{bucket_name}/*"],
                                               actions=["s3:*"])
        allow_bucket_actions = iam.PolicyStatement(
            resources=[f"arn:aws:s3:::{bucket_name}"],
            actions=['s3:*'],
        )
        codebuild_project.add_to_role_policy(allow_object_actions)
        codebuild_project.add_to_role_policy(allow_bucket_actions)
Пример #24
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        bucket = s3.Bucket(self, "Bucket", website_index_document="index.html")

        config = {"comment": "mythical-mysfits"}

        origin = cloudfront.CfnCloudFrontOriginAccessIdentity(
            self,
            "BucketOrigin",
            cloud_front_origin_access_identity_config=config)

        identity = iam.CanonicalUserPrincipal(
            canonical_user_id=origin.attr_s3_canonical_user_id)

        bucket.grant_read(identity)

        cloudfront_behaviour = cloudfront.Behavior(
            max_ttl=core.Duration.seconds(60),
            allowed_methods=cloudfront.CloudFrontAllowedMethods.
            GET_HEAD_OPTIONS,
            is_default_behavior=True)
        cloudfront_distribution = cloudfront.CloudFrontWebDistribution(
            self,
            "CloudFront",
            viewer_protocol_policy=cloudfront.ViewerProtocolPolicy.ALLOW_ALL,
            price_class=cloudfront.PriceClass.PRICE_CLASS_ALL,
            origin_configs=[
                cloudfront.SourceConfiguration(
                    behaviors=[cloudfront_behaviour],
                    origin_path="/web",
                    s3_origin_source=cloudfront.S3OriginConfig(
                        s3_bucket_source=bucket,
                        origin_access_identity_id=origin.ref),
                )
            ],
        )

        contentDir = os.path.realpath("../web/")
        source = deployment.Source.asset(contentDir)
        deployment.BucketDeployment(
            self,
            "DeployWebsite",
            sources=[source],
            destination_key_prefix="web/",
            destination_bucket=bucket,
            distribution=cloudfront_distribution,
            retain_on_delete=False,
        )

        core.CfnOutput(
            self,
            "CloudFrontURL",
            description="The CloudFront distribution URL",
            value="http://{}".format(cloudfront_distribution.domain_name),
        )
Пример #25
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # create s3 bucket for athena data
        self.s3_bucket = s3.Bucket(
            self,
            "s3_bucket",
            public_read_access=False,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            removal_policy=core.RemovalPolicy.DESTROY,
            lifecycle_rules=[
                s3.LifecycleRule(
                    # delete the files after 1800 days (5 years)
                    expiration=core.Duration.days(1800),
                    transitions=[
                        # move files into glacier after 90 days
                        s3.Transition(
                            transition_after=core.Duration.days(90),
                            storage_class=s3.StorageClass.GLACIER,
                        )
                    ],
                )
            ],
        )
        # tag the bucket
        core.Tag.add(self.s3_bucket, "project", constants["PROJECT_TAG"])

        # lambda policies
        athena_bucket_empty_policy = [
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                actions=["s3:ListBucket"],
                resources=["*"],
            ),
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                actions=[
                    "s3:DeleteObject",
                ],
                resources=[f"{self.s3_bucket.bucket_arn}/*"],
            ),
        ]

        # create the custom resource
        athena_bucket_empty = CustomResource(
            self,
            "athena_bucket_empty",
            PhysicalId="athenaBucketEmpty",
            Description="Empty athena s3 bucket",
            Uuid="f7d4f730-4ee1-11e8-9c2d-fa7ae01bbebc",
            HandlerPath=os.path.join(dirname, "../helpers/s3_bucket_empty.py"),
            BucketName=self.s3_bucket.bucket_name,
            ResourcePolicies=athena_bucket_empty_policy,
        )
        # needs a dependancy
        athena_bucket_empty.node.add_dependency(self.s3_bucket)
Пример #26
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        self.table_name = 'globaldatatest.global.table'
        executelambda = aws_lambda.Function(
            self,
            id='execute',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            handler='execute.lambda_handler',
            code=aws_lambda.Code.asset('lambda'))

        statsLambda = aws_lambda.Function(
            self,
            id='stats',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            handler='stats.lambda_handler',
            code=aws_lambda.Code.asset('lambda'))

        resetLambda = aws_lambda.Function(
            self,
            id='reset',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            handler='reset.lambda_handler',
            code=aws_lambda.Code.asset('lambda'))

        my_bucket = s3.Bucket(self, id='s3buckset3', bucket_name='s3mybucket')
        table = aws_dynamodb.Table(self,
                                   'table3',
                                   partition_key={
                                       'name': 'key',
                                       'type':
                                       aws_dynamodb.AttributeType.STRING
                                   },
                                   table_name='commands')
        api = apigateway.RestApi(
            self,
            "web-shell-apii",
            rest_api_name="Web Shell",
            description="This service serves shell commands.")
        executeIntegration = apigateway.LambdaIntegration(executelambda)
        statsIntegration = apigateway.LambdaIntegration(statsLambda)
        resetIntegration = apigateway.LambdaIntegration(resetLambda)

        executeResource = api.root.add_resource('execute')
        executeResource.add_method("POST",
                                   executeIntegration,
                                   api_key_required=True)

        statsResource = api.root.add_resource('stats')
        statsResource.add_method("GET",
                                 statsIntegration,
                                 api_key_required=True)

        resetResource = api.root.add_resource('reset')
        resetResource.add_method("PUT",
                                 resetIntegration,
                                 api_key_required=True)
Пример #27
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create an S3 Bucket):
        static_site_assets_bkt = _s3.Bucket(
            self,
            "assetsBucket",
            versioned=True,
            # public_read_access=True,
            # website_index_document="index.html",
            # website_error_document="404.html",
            removal_policy=core.RemovalPolicy.DESTROY)

        # Add assets to static site bucket
        add_assets_to_site = _s3_deployment.BucketDeployment(
            self,
            "deployStaticSiteAssets",
            sources=[
                _s3_deployment.Source.asset("advanced_use_cases/static_assets")
            ],
            destination_bucket=static_site_assets_bkt)

        # Create OAI for Cloudfront
        static_site_oai = _cloudfront.OriginAccessIdentity(
            self,
            "staticSiteOai",
            comment=f"OAI for static site from stack:{core.Aws.STACK_NAME}")

        # Deploy Cloudfront Configuration: Connecting OAI with static asset bucket
        cf_source_configuration = _cloudfront.SourceConfiguration(
            s3_origin_source=_cloudfront.S3OriginConfig(
                s3_bucket_source=static_site_assets_bkt,
                origin_access_identity=static_site_oai),
            behaviors=[
                _cloudfront.Behavior(
                    is_default_behavior=True,
                    compress=True,
                    allowed_methods=_cloudfront.CloudFrontAllowedMethods.ALL,
                    cached_methods=_cloudfront.CloudFrontAllowedCachedMethods.
                    GET_HEAD)
            ])

        # Create Cloudfront Distribution
        static_site_distribution = _cloudfront.CloudFrontWebDistribution(
            self,
            "staticSiteCfDistribution",
            comment="CDN for static website",
            origin_configs=[cf_source_configuration],
            price_class=_cloudfront.PriceClass.PRICE_CLASS_100)

        # Output Cloudfront Url
        output_1 = core.CfnOutput(
            self,
            "CloudfrontUrl",
            value=f"{static_site_distribution.domain_name}",
            description="The domain name of the static site")
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # S3 Bucket to store files
        myBucket = aws_s3.Bucket(self,
                                 "S3Bucket",
                                 bucket_name="new-app-bucket-example"
                                 )

        # Lambda triggered periodically
        myScheduledLambda = _lambda.Function(
            self,
            "scheduled_lambda",
            description="Lambda that will be called according to a schedule",
            function_name="scheduledLambda",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="scheduled_lambda.handler",
            code=_lambda.Code.from_asset("../functions"),
            memory_size=128,  # Memory in mb
            retry_attempts=2,
            environment={"S3_BUCKET": myBucket.bucket_name}
        )
        # Give Lambda access to S3 bucket
        myBucket.grant_read(myScheduledLambda)
        myBucket.grant_put(myScheduledLambda)

        # Create cron Rule for Lambda
        myScheduledLambdaEvent = aws_events.Rule(
            self,
            "scheduled_lambda_event",
            schedule=aws_events.Schedule.rate(core.Duration.minutes(2)),
            enabled=True,
            targets=[aws_events_targets.LambdaFunction(handler=myScheduledLambda)]
        )

        # API Gateway Proxy Lambda definition
        myEndpointLambda = _lambda.Function(
            self,
            "endpoint_lambda",
            description="Lambda Proxy with Api Gateway",
            function_name="endpointLambda",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="endpoint_lambda.handler",
            code=_lambda.Code.from_asset("../functions"),
            memory_size=128,  # Memory in mb
            retry_attempts=2,
            environment={"S3_BUCKET": myBucket.bucket_name}
        )
        # Give lambda access to S3 Bucket
        myBucket.grant_read(myEndpointLambda)
        myBucket.grant_put(myEndpointLambda)

        # Define API Gateway endpoints
        api = aws_apigateway.RestApi(self, "ApiEndpoint")
        api.root.resource_for_path("/test").add_method("GET",
                                                       aws_apigateway.LambdaIntegration(handler=myEndpointLambda))
Пример #29
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        bucket = s3.Bucket(
            self,
            "MyFirstBucket",
            bucket_name="aws-wide-unique-name-CHANGE-THIS",
            versioned=True,
            removal_policy=core.RemovalPolicy.DESTROY,
        )
Пример #30
0
    def __init__(self, app: core.App, id: str, **kwargs) -> None:
        super().__init__(app, id)

        # The code that defines your stack goes here
        bucket = s3.Bucket(
            self,
            "MyFirstBucket",
            versioned=True,
            encryption=s3.BucketEncryption.KMS_MANAGED,
        )