def __init__(self, scope: core.Construct, id: str, ** kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Import an S3 Bucket), get existing bucket:  
        konstone_bkt = _s3.Bucket.from_bucket_attributes(self,
                                                         "konstoneAssetsBucket",
                                                         bucket_name="konstone-assets-bkt")

        # Create Lambda function with source code from S3 Bucket
        konstone_fn = _lambda.Function(self,
                                       "konstoneFunction",
                                       function_name="konstone_fn",
                                       runtime=_lambda.Runtime.PYTHON_3_7,
                                       handler="konstone_processor.lambda_handler",
                                       code=_lambda.S3Code(
                                           bucket=konstone_bkt,
                                           key="lambda_src/konstone_processor.zip"
                                       ),
                                       timeout=core.Duration.seconds(2),
                                       reserved_concurrent_executions=1
                                       )

        # Create Custom Loggroup 
        # when lambda gets deleted, log group also is deleted
        # /aws/lambda/function-name
        konstone_lg = _logs.LogGroup(self,
                                     "konstoneLoggroup",
                                     log_group_name=f"/aws/lambda/{konstone_fn.function_name}",
                                     removal_policy=core.RemovalPolicy.DESTROY,
                                     retention=_logs.RetentionDays.ONE_WEEK
                                     )
    def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        #import s3
        test_s3 = aws_s3.Bucket.from_bucket_attributes(
            self,
            "tests3",
            bucket_name="manuja-test1"
        )

        #lambda
        test_func01 = aws_lambda.Function(
            self,
            "testlambda",
            function_name="lambdaFromS3",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            handler="function.lambda_handler",
            code=aws_lambda.S3Code(
                bucket=test_s3,
                key="lambda/function.zip"
            ),
            timeout=core.Duration.seconds(5),
            reserved_concurrent_executions=1
        )

        #attached cloudwatch log group
        log_group01 = aws_logs.LogGroup(
            self,
            "cloudwatchlog01",
            log_group_name=f"/aws/lambda/{test_func01.function_name}",
            retention=aws_logs.RetentionDays.ONE_DAY,
            removal_policy=core.RemovalPolicy.DESTROY
        )
Ejemplo n.º 3
0
    def __init__(self, app: App, id: str) -> None:
        super().__init__(app, id)

        lambda_code_bucket = s3.Bucket.from_bucket_attributes(
            self, 'LambdaCodeBucket', bucket_name='my-lambda-code-bucket')

        lambdaFn = lambda_.Function(self,
                                    'Singleton',
                                    handler='index.main',
                                    code=lambda_.S3Code(
                                        bucket=lambda_code_bucket,
                                        key='my-lambda.py'),
                                    runtime=lambda_.Runtime.PYTHON_3_7,
                                    timeout=Duration.seconds(300))
Ejemplo n.º 4
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # lambda function
        source_bucket = _s3.Bucket.from_bucket_name(self, "sourceBucket",
                                                    "cdk-tutorials-resources")

        custom_lambda_s3 = _lambda.Function(
            self,
            "customLambdaS3",
            function_name="custom_lambda_s3",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="custom_lambda.lambda_handler",
            code=_lambda.S3Code(bucket=source_bucket, key="custom_lambda.zip"),
            timeout=core.Duration.seconds(3),
            reserved_concurrent_executions=1,
            environment={
                "Log_Group": "INFO",
            },
        )

        _logs.LogGroup(
            self,
            "customLogGroupS3",
            log_group_name=f"/aws/lambda/{custom_lambda_s3.function_name}",
            removal_policy=core.RemovalPolicy.DESTROY,
            retention=_logs.RetentionDays.ONE_WEEK,
        )

        # api gateway for lambda
        api = _apig.LambdaRestApi(self,
                                  "apiEndpoint",
                                  handler=custom_lambda_s3)

        core.CfnOutput(self,
                       "apiUrl",
                       value=f"{api.url}",
                       description="api url")
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # dynamodb table
        mytable = _dynamodb.Table(
            self,
            "mytable",
            table_name="my-table",
            partition_key=_dynamodb.Attribute(
                name=id, type=_dynamodb.AttributeType.STRING),
            removal_policy=core.RemovalPolicy.DESTROY,
            server_side_encryption=True,
        )

        # lambda function
        source_bucket = _s3.Bucket.from_bucket_name(self, "sourceBucket",
                                                    "cdk-tutorials-resources")

        custom_lambda_s3 = _lambda.Function(
            self,
            "customLambdaS3",
            function_name="custom_lambda_s3",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="custom_lambda.lambda_handler",
            code=_lambda.S3Code(bucket=source_bucket, key="custom_lambda.zip"),
            timeout=core.Duration.seconds(3),
            reserved_concurrent_executions=1,
            environment={
                "Log_Group": "INFO",
                "TABLE_NAME": f"{mytable.table_name}",
                "BUCKET_NAME": f"{source_bucket.bucket_name}",
            },
        )

        _logs.LogGroup(
            self,
            "customLogGroupS3",
            log_group_name=f"/aws/lambda/{custom_lambda_s3.function_name}",
            removal_policy=core.RemovalPolicy.DESTROY,
            retention=_logs.RetentionDays.ONE_WEEK,
        )

        # lambda role
        s3_policy = _iam.ManagedPolicy(
            self,
            "listBucketsPolicy",
            description="list s3 buckets",
            managed_policy_name="listBuckets",
            statements=[
                _iam.PolicyStatement(effect=_iam.Effect.ALLOW,
                                     actions=["s3:List*"],
                                     resources=["*"])
            ],
        )
        db_policy = _iam.ManagedPolicy(
            self,
            "dbPolicy",
            description="get and put items",
            managed_policy_name="dbPutGet",
            statements=[
                _iam.PolicyStatement(
                    effect=_iam.Effect.ALLOW,
                    actions=["dynamodb:GetItem", "dynamodb:PutItem"],
                    resources=[f"{mytable.table_arn}"],
                )
            ],
        )
        custom_lambda_s3.role.add_managed_policy(s3_policy)
        custom_lambda_s3.role.add_managed_policy(db_policy)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        if kwargs['env'].account == '828661178764':
            gbc_environment = 'devl'
        else:
            raise ValueError('Account not mapped!')

        # Define location of lambda deployment packages:
        bucket_source = s3.Bucket.from_bucket_name(self,
                                                   'bucket_source_id',
                                                   bucket_name='gbc-lambda')
        generic_loader = _lambda.S3Code(
            bucket_source, key='generic-loader/generic-loader-test2.zip')

        # Define attributes for the networking part:
        _vpc = ec2.Vpc.from_vpc_attributes(
            self,
            'myVPC',
            vpc_id='vpc-042f6b22897562107',
            availability_zones=['ca-central-1a', 'ca-central-1b'])

        _subnets = [
            ec2.Subnet.from_subnet_id(self,
                                      'subnet1',
                                      subnet_id='subnet-0579afb06d9cec8ed'),
            ec2.Subnet.from_subnet_id(self,
                                      'subnet2',
                                      subnet_id='subnet-07a0e458dc7ea0228')
        ]

        _security_group = [
            ec2.SecurityGroup.from_security_group_id(
                self, 'mySG', security_group_id='sg-0264ea677ccfef4ff')
        ]

        my_lambda = _lambda.Function(
            self,
            'redshift-generic-loader',
            runtime=_lambda.Runtime.PYTHON_3_6,
            code=generic_loader,
            timeout=core.Duration.seconds(30),
            handler='lambda_function.lambda_handler',
            role=iam.Role.from_role_arn(
                self,
                'myRole',
                role_arn='arn:aws:iam::828661178764:role/lambda-data-analytics',
                mutable=False),
            vpc=_vpc,
            vpc_subnets=ec2.SubnetSelection(subnets=_subnets),
            security_groups=_security_group)

        # Create main bucket for pipelines and register notifications:
        main_bucket = s3.Bucket(
            self,
            'new_bucket_id',
            bucket_name=f'gbc-analytics-prototype-1-cdk-{gbc_environment}',
            versioned=True,
            encryption=s3.BucketEncryption.S3_MANAGED)

        target_lambda = s3_notif.LambdaDestination(my_lambda)
        key_filter_1 = s3.NotificationKeyFilter(prefix='cdk-test/')
        main_bucket.add_event_notification(s3.EventType.OBJECT_CREATED,
                                           target_lambda, key_filter_1)
Ejemplo n.º 7
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ## create lambda function from inline function
        try:
            with open("./lambda_src/custom_lambda.py", "r") as f:
                custom_lambda_code = f.read()
        except OSError as e:
            raise Exception(f"fals to open lambda function code, error {e}")

        custom_lambda_fn = _lambda.Function(
            self,
            "customLambda",
            function_name="custom_lambda",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="index.lambda_handler",
            code=_lambda.InlineCode(custom_lambda_code),
            timeout=core.Duration.seconds(3),
            reserved_concurrent_executions=1,
            environment={"LOG_LEVEL": "INFO"},
        )

        # create custom log group
        custom_loggroup = _logs.LogGroup(
            self,
            "customLogGroup",
            log_group_name=f"/aws/lambda/{custom_lambda_fn.function_name}",
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        ## create lambda function from s3
        source_bucket = _s3.Bucket.from_bucket_name(self, "sourceBucket", "cdk-tutorials-resources")

        custom_lambda_s3 = _lambda.Function(
            self,
            "customLambdaS3",
            function_name="custom_lambda_s3",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="custom_lambda.lambda_handler",
            code=_lambda.S3Code(bucket=source_bucket, key="custom_lambda.zip"),
            timeout=core.Duration.seconds(3),
            reserved_concurrent_executions=1,
            environment={"Log_Group": "INFO"},
        )

        _logs.LogGroup(
            self,
            "customLogGroupS3",
            log_group_name=f"/aws/lambda/{custom_lambda_s3.function_name}",
            removal_policy=core.RemovalPolicy.DESTROY,
            retention=_logs.RetentionDays.ONE_WEEK,
        )

        ## cloudwatch rules
        #  - every day at 10:00 UTC
        six_pm_cron = _events.Rule(
            self,
            "sixPmRule",
            schedule=_events.Schedule.cron(
                minute="0", hour="18", month="*", week_day="MON-FRI", year="*"
            ),
        )
        # - every 3 minutes
        run_every_3_mins = _events.Rule(
            self, "every3Mins", schedule=_events.Schedule.rate(core.Duration.minutes(3))
        )

        # add lambda to cloudwatch event rules
        six_pm_cron.add_target(_targets.LambdaFunction(custom_lambda_fn))
        run_every_3_mins.add_target(_targets.LambdaFunction(custom_lambda_s3))