def __init__(self, scope: core.Construct, id: str, ** kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Import an S3 Bucket), get existing bucket:  
        konstone_bkt = _s3.Bucket.from_bucket_attributes(self,
                                                         "konstoneAssetsBucket",
                                                         bucket_name="konstone-assets-bkt")

        # Create Lambda function with source code from S3 Bucket
        konstone_fn = _lambda.Function(self,
                                       "konstoneFunction",
                                       function_name="konstone_fn",
                                       runtime=_lambda.Runtime.PYTHON_3_7,
                                       handler="konstone_processor.lambda_handler",
                                       code=_lambda.S3Code(
                                           bucket=konstone_bkt,
                                           key="lambda_src/konstone_processor.zip"
                                       ),
                                       timeout=core.Duration.seconds(2),
                                       reserved_concurrent_executions=1
                                       )

        # Create Custom Loggroup 
        # when lambda gets deleted, log group also is deleted
        # /aws/lambda/function-name
        konstone_lg = _logs.LogGroup(self,
                                     "konstoneLoggroup",
                                     log_group_name=f"/aws/lambda/{konstone_fn.function_name}",
                                     removal_policy=core.RemovalPolicy.DESTROY,
                                     retention=_logs.RetentionDays.ONE_WEEK
                                     )
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Read Lambda Code):
        try:
            with open("serverless_stacks/lambda_src/konstone_processor.py",
                      mode="r") as f:
                konstone_fn_code = f.read()
        except OSError:
            print("Unable to read Lambda Function Code")

        konstone_fn = _lambda.Function(
            self,
            "konstoneFunction",
            function_name="konstone_function",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="index.lambda_handler",
            code=_lambda.InlineCode(konstone_fn_code),
            timeout=core.Duration.seconds(3),
            reserved_concurrent_executions=1,
            environment={"LOG_LEVEL": "INFO"})

        # Create Custom Loggroup
        # /aws/lambda/function-name
        konstone_lg = _logs.LogGroup(
            self,
            "konstoneLoggroup",
            log_group_name=f"/aws/lambda/{konstone_fn.function_name}",
            removal_policy=core.RemovalPolicy.DESTROY)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        self.max_azs = 3

        vpc = ec2.Vpc(
            self,
            f"{id}-{kwargs['env']['deploy_env']}-vpc",
            cidr=kwargs['env']['cidr'],
            default_instance_tenancy=ec2.DefaultInstanceTenancy.DEFAULT,
            enable_dns_hostnames=kwargs['env']['aws_dns'],
            enable_dns_support=kwargs['env']['aws_dns'],
            max_azs=self.max_azs,
            nat_gateways=self.max_azs if kwargs['env']['ha'] else 1,
            subnet_configuration=[
                ec2.SubnetConfiguration(name='public',
                                        subnet_type=ec2.SubnetType.PUBLIC,
                                        cidr_mask=20),
                ec2.SubnetConfiguration(name='app',
                                        subnet_type=ec2.SubnetType.PRIVATE,
                                        cidr_mask=20),
                ec2.SubnetConfiguration(name='data',
                                        subnet_type=ec2.SubnetType.PRIVATE,
                                        cidr_mask=20)
            ])

        flowlog_log_group = logs.LogGroup(
            self,
            f"{id}-{kwargs['env']['deploy_env']}-flowlog-log-group",
            log_group_name=f"/flowlogs/{kwargs['env']['deploy_env']}",
            retention=logs.RetentionDays.ONE_MONTH)

        iam_policy = iam.PolicyDocument(
            assign_sids=True,
            statements=[
                iam.PolicyStatement(
                    actions=[
                        "logs:CreateLogStream", "logs:PutLogEvents",
                        "logs:DescribeLogGroups", "logs:DescribeLogStreams"
                    ],
                    effect=iam.Effect.ALLOW,
                    resources=[flowlog_log_group.log_group_arn])
            ])

        iam_role = iam.Role(
            self,
            f"{id}-{kwargs['env']['deploy_env']}-flowlog-role",
            assumed_by=iam.ServicePrincipal('vpc-flow-logs.amazonaws.com'),
            inline_policies={
                f"{id}-{kwargs['env']['deploy_env']}-flowlogs": iam_policy
            })

        flowlog = ec2.CfnFlowLog(
            self,
            f"{id}-{kwargs['env']['deploy_env']}-flowlog",
            deliver_logs_permission_arn=iam_role.role_arn,
            log_destination_type='cloud-watch-logs',
            log_group_name=f"/flowlogs/{kwargs['env']['deploy_env']}",
            traffic_type='ALL',
            resource_type='VPC',
            resource_id=vpc.vpc_id)
Ejemplo n.º 4
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Create SNS Topic
        # SNS topic
        topic = sns.Topic(self,
                          'sns-to-lambda-topic',
                          display_name='My SNS topic')

        # Create Lambda function
        lambdaFn = _lambda.Function(self,
                                    "SNSPublisher",
                                    runtime=_lambda.Runtime.PYTHON_3_9,
                                    code=_lambda.Code.from_asset("lambda"),
                                    handler="handler.main",
                                    timeout=cdk.Duration.seconds(10))

        # Set Lambda Logs Retention and Removal Policy
        logs.LogGroup(self,
                      'logs',
                      log_group_name=f"/aws/lambda/{lambdaFn.function_name}",
                      removal_policy=cdk.RemovalPolicy.DESTROY,
                      retention=logs.RetentionDays.ONE_DAY)

        # Grant publish to lambda function
        topic.grant_publish(lambdaFn)

        cdk.CfnOutput(self,
                      'snsTopicArn',
                      value=topic.topic_arn,
                      description='The arn of the SNS topic')
        cdk.CfnOutput(self,
                      'functionName',
                      value=lambdaFn.function_name,
                      description='The name of the handler function')
Ejemplo n.º 5
0
    def __init__(self, app: App, id: str) -> None:
        super().__init__(app, id)

        # Lambda Function
        lambdaFn = _lambda.Function(self,
                                    "SNSEventHandler",
                                    runtime=_lambda.Runtime.PYTHON_3_9,
                                    code=_lambda.Code.from_asset("lambda"),
                                    handler="handler.main",
                                    timeout=Duration.seconds(10))

        # Set Lambda Logs Retention and Removal Policy
        logs.LogGroup(self,
                      'logs',
                      log_group_name=f"/aws/lambda/{lambdaFn.function_name}",
                      removal_policy=RemovalPolicy.DESTROY,
                      retention=logs.RetentionDays.ONE_DAY)

        # SNS topic
        topic = sns.Topic(self,
                          'sns-to-lambda-topic-test',
                          display_name='My SNS topic')

        # subscribe Lambda to SNS topic
        topic.add_subscription(subs.LambdaSubscription(lambdaFn))

        # Output information about the created resources
        CfnOutput(self,
                  'snsTopicArn',
                  value=topic.topic_arn,
                  description='The arn of the SNS topic')
        CfnOutput(self,
                  'functionName',
                  value=lambdaFn.function_name,
                  description='The name of the handler function')
Ejemplo n.º 6
0
    def __init__(self, app: App, id: str) -> None:
        super().__init__(app, id)

        # Lambda Function
        with open("lambda-handler.py", encoding="utf8") as fp:
            handler_code = fp.read()

        lambdaFn = lambda_.Function(
            self, "Singleton",
            code=lambda_.InlineCode(handler_code),
            handler="index.main",
            timeout=Duration.seconds(10),
            runtime=lambda_.Runtime.PYTHON_3_9,
        )

        # Set Lambda Logs Retention and Removal Policy
        logs.LogGroup(
            self,
            'logs',
            log_group_name = f"/aws/lambda/{lambdaFn.function_name}",
            removal_policy = RemovalPolicy.DESTROY,
            retention = logs.RetentionDays.ONE_DAY
        )

        # EventBridge Rule
        rule = events.Rule(
            self, "Rule",
        )
        rule.add_event_pattern(
            source=["cdk.myApp"],
            detail_type=["transaction"]
        )
        rule.add_target(targets.LambdaFunction(lambdaFn))
Ejemplo n.º 7
0
 def _create_api_log_group(self, gw):
     return _logs.LogGroup(self,
                           gw["gw_log_group_name"],
                           log_group_name="/aws/apigateway/" +
                           gw["gw_log_group_name"],
                           retention=LOG_RETENTION_PERIOD,
                           removal_policy=core.RemovalPolicy.DESTROY)
    def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        #import s3
        test_s3 = aws_s3.Bucket.from_bucket_attributes(
            self,
            "tests3",
            bucket_name="manuja-test1"
        )

        #lambda
        test_func01 = aws_lambda.Function(
            self,
            "testlambda",
            function_name="lambdaFromS3",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            handler="function.lambda_handler",
            code=aws_lambda.S3Code(
                bucket=test_s3,
                key="lambda/function.zip"
            ),
            timeout=core.Duration.seconds(5),
            reserved_concurrent_executions=1
        )

        #attached cloudwatch log group
        log_group01 = aws_logs.LogGroup(
            self,
            "cloudwatchlog01",
            log_group_name=f"/aws/lambda/{test_func01.function_name}",
            retention=aws_logs.RetentionDays.ONE_DAY,
            removal_policy=core.RemovalPolicy.DESTROY
        )
Ejemplo n.º 9
0
def get_fargate_container(
        scope: core.Construct,
        construct_id: str,
        task: aws_ecs.TaskDefinition,
        mem_limit: str,
        environment: Optional[Mapping] = None) -> aws_ecs.ContainerDefinition:
    container_log_group = aws_logs.LogGroup(
        scope,
        f"{construct_id}-container-log-group",
        log_group_name=f"/aws/ecs/{construct_id}",
        retention=DEFAULT_LOG_RETENTION,
        removal_policy=core.RemovalPolicy.DESTROY,
    )

    return task.add_container(
        f"{construct_id}-task-container",
        image=aws_ecs.ContainerImage.from_asset(  # pylint: disable=no-value-for-parameter
            directory=f"lib/stacks/{construct_id}/docker".replace("-", "_")),
        memory_limit_mib=int(mem_limit),
        working_directory="/tmp",
        logging=aws_ecs.LogDrivers.aws_logs(  # pylint: disable=no-value-for-parameter
            log_group=container_log_group,
            stream_prefix=construct_id,
        ),
        environment=environment,
    )
Ejemplo n.º 10
0
    def __init__(self, app: App, id: str) -> None:
        super().__init__(app, id)

        # CloudWatch Logs Group
        log_group = logs.LogGroup(
            self, "logs",
            retention=logs.RetentionDays.ONE_DAY,
            removal_policy = RemovalPolicy.DESTROY
        )

        # Custom EventBridge Bus
        custom_bus = events.EventBus(
            self, "bus",
            event_bus_name="test-bus-cdk"
        )

        # EventBridge Rule
        rule = events.Rule(
            self, "rule",
            event_bus=custom_bus
        )
        rule.add_event_pattern(
            source=["my-cdk-application"],
            detail_type=["message"]
        )
        rule.add_target(targets.CloudWatchLogGroup(log_group))

        CfnOutput(
            self, "LogGroupName",
            description="Name of CloudWatch Log Group",
            value=log_group.log_group_name
        )
Ejemplo n.º 11
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        #import function code
        try:
            with open("serverless_stack/functions/function.py",
                      mode="r") as file:
                function_body = file.read()
        except OSError:
            print('File can not read')

        function_01 = aws_lambda.Function(
            self,
            "lambdafunction01",
            function_name="LambdaTestCDK",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            handler="index.lambda_handler",
            code=aws_lambda.InlineCode(function_body),
            timeout=core.Duration.seconds(5),
            reserved_concurrent_executions=1,
            environment={'LOG_LEVEL': 'INFO'})

        #attached cloudwatch log group
        log_group01 = aws_logs.LogGroup(
            self,
            "cloudwatchlog01",
            log_group_name=f"/aws/lambda/{function_01.function_name}",
            removal_policy=core.RemovalPolicy.DESTROY)
Ejemplo n.º 12
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Lambda function

        hello_function = lambda_.Function(
            self,
            "hello-function",
            code=lambda_.Code.from_asset("src/hello/"),
            handler="main.handler",
            runtime=lambda_.Runtime.PYTHON_3_8,
            tracing=lambda_.Tracing.ACTIVE)

        logs.LogGroup(
            self,
            "hello-logs",
            log_group_name=f"/aws/lambda/{hello_function.function_name}",
            retention=logs.RetentionDays.ONE_WEEK)

        # API Gateway

        api_logs = logs.LogGroup(self,
                                 "hello-api-logs",
                                 retention=logs.RetentionDays.ONE_WEEK)

        api = apigw.RestApi(
            self,
            "hello-api",
            deploy_options=apigw.StageOptions(
                access_log_destination=apigw.LogGroupLogDestination(api_logs),
                access_log_format=apigw.AccessLogFormat.
                json_with_standard_fields(caller=True,
                                          http_method=True,
                                          ip=True,
                                          protocol=True,
                                          request_time=True,
                                          resource_path=True,
                                          response_length=True,
                                          status=True,
                                          user=True),
                throttling_burst_limit=1000,
                throttling_rate_limit=10,
                tracing_enabled=True))

        hello_integration = apigw.LambdaIntegration(hello_function, proxy=True)
        api.root.add_method("GET", hello_integration)
Ejemplo n.º 13
0
    def __init__(self, scope: core.Construct, id: str, *, app_env: str, **kwargs):
        super().__init__(scope, id, kwargs)

        params = {
            'log_group_name': f'open-{app_env}-ecs-log-group',
            'retention': logs.RetentionDays.ONE_MONTH,
        }
        self._logs = logs.LogGroup(self, 'open-log-group', **params)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Add your stack resources below):
        # Create an S3 Bucket for storing our web store assets
        kk_store = _s3.Bucket(self, "kkStore", versioned=True)

        # DynamoDB Table
        kk_store_assets_table = _dynamodb.Table(
            self,
            "kkStoreAssetsDDBTable",
            table_name="kk_store_assets_tables",
            partition_key=_dynamodb.Attribute(
                name="_id", type=_dynamodb.AttributeType.STRING),
            removal_policy=core.RemovalPolicy.DESTROY)

        # Read Lambda Code
        try:
            with open("advanced_use_cases/lambda_src/s3_event_processor.py",
                      mode="r") as f:
                kk_store_processor_fn_code = f.read()
        except OSError:
            print("Unable to read Lambda Function Code")

        # Deploy the lambda function
        kk_store_processor_fn = _lambda.Function(
            self,
            "kkStoreProcessorFn",
            function_name="kk_store_processor_fn",
            description="Process store events and update DDB",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="index.lambda_handler",
            code=_lambda.InlineCode(kk_store_processor_fn_code),
            timeout=core.Duration.seconds(3),
            reserved_concurrent_executions=1,
            environment={
                "LOG_LEVEL": "INFO",
                "DDB_TABLE_NAME": f"{kk_store_assets_table.table_name}"
            })

        # Add DynamoDB Write Privileges To Lambda
        kk_store_assets_table.grant_read_write_data(kk_store_processor_fn)

        # Create Custom Loggroup
        kk_store_lg = _logs.LogGroup(
            self,
            "kkStoreLogGroup",
            log_group_name=f"/aws/lambda/{kk_store_processor_fn.function_name}",
            removal_policy=core.RemovalPolicy.DESTROY,
            retention=_logs.RetentionDays.ONE_DAY)

        # Create s3 notification for lambda function
        kk_store_backend = _s3_notifications.LambdaDestination(
            kk_store_processor_fn)

        # Assign notification for the s3 event type (ex: OBJECT_CREATED)
        kk_store.add_event_notification(_s3.EventType.OBJECT_CREATED,
                                        kk_store_backend)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id)

        # Read Lambda Code:)
        try:
            with open(
                    "waf_stacks/custom_resources/waf_rate_rule_creator/lambda_src/index.py",
                    encoding="utf-8",
                    mode="r") as f:
                waf_rate_rule_creator_fn_code = f.read()
        except OSError:
            print("Unable to read Lambda Function Code")
            raise

        # Create IAM Permission Statements that are required by the Lambda

        role_stmt1 = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=["*"],
            actions=["wafv2:GetWebACL", "wafv2:UpdateWebACL"])
        role_stmt1.sid = "AllowLambdaToCreateWafRules"

        waf_rate_rule_creator_fn = _lambda.SingletonFunction(
            self,
            "waFRateRuleCreatorSingleton",
            uuid="mystique30-4ee1-11e8-9c2d-fa7ae01bbebc",
            code=_lambda.InlineCode(waf_rate_rule_creator_fn_code),
            handler="index.lambda_handler",
            timeout=core.Duration.seconds(10),
            runtime=_lambda.Runtime.PYTHON_3_7,
            reserved_concurrent_executions=1,
            environment={
                "LOG_LEVEL": "INFO",
                "APP_ENV": "Production"
            },
            description="Creates a rate based WAF rule")

        waf_rate_rule_creator_fn.add_to_role_policy(role_stmt1)

        # Create Custom Log group
        waf_rate_rule_creator_fn_lg = _logs.LogGroup(
            self,
            "wafRateRuleCreatorLogGroup",
            log_group_name=
            f"/aws/lambda/{waf_rate_rule_creator_fn.function_name}",
            retention=_logs.RetentionDays.ONE_WEEK,
            removal_policy=core.RemovalPolicy.DESTROY)

        waf_rate_rule_creator = cfn.CustomResource(
            self,
            "wafRateRuleCreatorCustomResource",
            provider=cfn.CustomResourceProvider.lambda_(
                waf_rate_rule_creator_fn),
            properties=kwargs,
        )

        self.response = waf_rate_rule_creator.get_att(
            "rule_add_status").to_string()
Ejemplo n.º 16
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        api_log_group = cwlogs.LogGroup(self, "HelloWorldAPILogs")

        # Create the api gateway for this lambda set
        self.target_api = apigw.RestApi(
            self,
            'HelloWorldAPI',
            rest_api_name='HelloWorld',
            endpoint_types=[apigw.EndpointType.REGIONAL],
            deploy_options={
                "access_log_destination":
                apigw.LogGroupLogDestination(api_log_group),
                "access_log_format":
                apigw.AccessLogFormat.clf(),
                "method_options": {
                    "/*/*":
                    {  # This special path applies to all resource paths and all HTTP methods
                        "throttling_rate_limit": 100,
                        "throttling_burst_limit": 200
                    }
                }
            })

        hello_world = py_lambda.PythonFunction(
            self,
            "HelloWorld",
            entry='thewafapigateway/lambda_fns',
            index='helloworld.py',
            handler='lambda_handler',
            description='Helloworld',
            timeout=core.Duration.seconds(60))

        entity = self.target_api.root.add_resource('helloworld')
        this_lambda_integration = apigw.LambdaIntegration(
            hello_world,
            proxy=False,
            integration_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin':
                    "'*'",
                }
            }])
        method = entity.add_method(
            'GET',
            this_lambda_integration,
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin': True,
                }
            }])

        self.resource_arn = f"arn:aws:apigateway:ap-southeast-2::/restapis/{self.target_api.rest_api_id}/stages/{self.target_api.deployment_stage.stage_name}"
Ejemplo n.º 17
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        api_log_group = cw_logs.LogGroup(self, "HelloWorldAPILogs")

        # Create the api gateway for this lambda set
        self.target_api = api_gw.RestApi(
            self,
            'HelloWorldAPI',
            rest_api_name='HelloWorld',
            endpoint_types=[api_gw.EndpointType.REGIONAL],
            deploy_options=api_gw.StageOptions(
                access_log_destination=api_gw.LogGroupLogDestination(
                    api_log_group),
                access_log_format=api_gw.AccessLogFormat.clf(),
                method_options={
                    # This special path applies to all resource paths and all HTTP methods
                    "/*/*":
                    api_gw.MethodDeploymentOptions(throttling_rate_limit=100,
                                                   throttling_burst_limit=200)
                }))

        hello_world = _lambda.Function(
            self,
            "HelloWorld",
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler='helloworld.lambda_handler',
            code=_lambda.Code.from_asset("lambda_fns"),
            timeout=core.Duration.seconds(60))

        entity = self.target_api.root.add_resource('helloworld')
        this_lambda_integration = api_gw.LambdaIntegration(
            hello_world,
            proxy=False,
            integration_responses=[
                api_gw.IntegrationResponse(
                    status_code='200',
                    response_parameters={
                        'method.response.header.Access-Control-Allow-Origin':
                        "'*'"
                    })
            ])
        entity.add_method(
            'GET',
            this_lambda_integration,
            method_responses=[
                api_gw.MethodResponse(
                    status_code='200',
                    response_parameters={
                        'method.response.header.Access-Control-Allow-Origin':
                        True
                    })
            ])

        self.resource_arn = f"arn:aws:apigateway:{core.Stack.of(self).region}::/restapis/{self.target_api.rest_api_id}/stages/{self.target_api.deployment_stage.stage_name}"
    def __init__(self, scope: core.Construct, id: str,
                 common_stack: CommonStack, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self._supported_in_region = self.is_service_supported_in_region()

        logs.LogGroup(self,
                      "android-integ-test-log-group",
                      log_group_name="com/amazonaws/tests")

        common_stack.add_to_common_role_policies(self)
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        queue = _sqs.Queue(self,
                           "MyQueue",
                           visibility_timeout=Duration.seconds(300))

        # Create the AWS Lambda function to subscribe to Amazon SQS queue
        # The source code is in './lambda' directory
        lambda_function = _lambda.Function(
            self,
            "MyLambdaFunction",
            runtime=_lambda.Runtime.PYTHON_3_9,
            handler="get_messages.handler",
            code=_lambda.Code.from_asset("lambda"),
        )

        # Set Lambda Logs Retention and Removal Policy
        logs.LogGroup(
            self,
            'logs',
            log_group_name=f"/aws/lambda/{lambda_function.function_name}",
            removal_policy=RemovalPolicy.DESTROY,
            retention=logs.RetentionDays.ONE_DAY)
        #Grant permission to AWS Lambda function to consume messages from the Amazon SQS queue
        queue.grant_consume_messages(lambda_function)

        #Configure the Amazon SQS queue to trigger the AWS Lambda function
        lambda_function.add_event_source(_event.SqsEventSource(queue))

        CfnOutput(self,
                  "FunctionName",
                  value=lambda_function.function_name,
                  export_name='FunctionName',
                  description='Function name')

        CfnOutput(self,
                  "QueueName",
                  value=queue.queue_name,
                  export_name='QueueName',
                  description='SQS queue name')

        CfnOutput(self,
                  "QueueArn",
                  value=queue.queue_arn,
                  export_name='QueueArn',
                  description='SQS queue ARN')

        CfnOutput(self,
                  "QueueUrl",
                  value=queue.queue_url,
                  export_name='QueueUrl',
                  description='SQS queue URL')
Ejemplo n.º 20
0
    def __init__(self, scope: core.Construct, id: builtins.str,
                 action_name: str, resources: FsiSharedResources,
                 function: lambda_.Function) -> None:
        super().__init__(scope, id)
        self.__resources = resources

        state_machine_name = id

        # Define the state machine definition...
        invoke_function = sft.LambdaInvoke(
            self,
            'InvokeFunction',
            lambda_function=function,
            invocation_type=sft.LambdaInvocationType.REQUEST_RESPONSE,
            input_path='$.Payload',
            result_path='$.Result')

        choice = sf.Choice(self,
                           'IsComplete',
                           comment='Check if theres more to process')
        choice.when(
            sf.Condition.string_equals('$.Result.Payload.Result.RunState',
                                       'RunStatus.MORE_AVAILABLE'),
            invoke_function)
        choice.when(
            sf.Condition.string_equals('$.Result.Payload.Result.RunState',
                                       'RunStatus.COMPLETE'),
            sf.Pass(self, 'Finalize', comment='Workflow Complete'))
        choice.otherwise(
            sf.Fail(self,
                    'NotImplemented',
                    cause='Unknown Choice',
                    error='NotImplementedException'))

        definition = invoke_function.next(choice)

        # Register the definition as StateMachine...
        zone_name = self.resources.landing_zone.zone_name
        self.state_machine = sf.StateMachine(
            self,
            'StateMachine',
            state_machine_name=state_machine_name,
            state_machine_type=sf.StateMachineType.STANDARD,
            timeout=core.Duration.hours(2),
            logs=sf.LogOptions(destination=logs.LogGroup(
                self,
                'LogGroup',
                removal_policy=core.RemovalPolicy.DESTROY,
                retention=RetentionDays.TWO_WEEKS,
                log_group_name='/homenet/fsi-{}/states/{}/{}'.format(
                    zone_name, self.component_name, action_name).lower())),
            tracing_enabled=True,
            definition=definition)
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        phoneNumber = CfnParameter(self,
                                   "phoneNumber",
                                   type="String",
                                   description="Recipient phone number")
        tenDLC = CfnParameter(self,
                              "tenDLC",
                              type="String",
                              description="10DLC origination number")

        # We create a log group so it will be gracefully cleaned up on a destroy event.  By default
        # logs never expire and won't be removed.
        lambdaLogGroup = logs.LogGroup(
            self,
            'SMSPublisherFunctionLogGroup',
            log_group_name='/aws/lambda/SMSPublisherFunction',
            removal_policy=RemovalPolicy.DESTROY,
            retention=logs.RetentionDays.FIVE_DAYS,
        )

        SMSPublisherFunction = aws_lambda.Function(
            self,
            'SMSPublisherFunction',
            code=aws_lambda.Code.from_asset('src'),
            function_name='SMSPublisherFunction',
            handler='app.handler',
            runtime=aws_lambda.Runtime.NODEJS_12_X,
            timeout=Duration.seconds(3),
            memory_size=128,
            environment={
                'phoneNumber': phoneNumber.value_as_string,
                'tenDLC': tenDLC.value_as_string
            },
            initial_policy=[
                iam.PolicyStatement(actions=['sns:Publish'],
                                    effect=iam.Effect.DENY,
                                    resources=['arn:aws:sns:*:*:*']),
                iam.PolicyStatement(actions=['sns:Publish'],
                                    effect=iam.Effect.ALLOW,
                                    resources=['*'])
            ],
        )
        # Make sure the log group is created prior to the function so CDK doesn't create a new one
        SMSPublisherFunction.node.add_dependency(lambdaLogGroup)

        CfnOutput(self,
                  'SMSPublisherFunctionName',
                  description='SMSPublisherFunction function name',
                  value=SMSPublisherFunction.function_name)
    def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        queue = sqs.Queue(self, "Queue", queue_name = "Events_DLQ")

        fn = lambda_.Function(self, "ETL_job_func",
            runtime=lambda_.Runtime.PYTHON_3_8,
            handler="lambda_function.handler",
            code=lambda_.Code.asset('lambda'),
            dead_letter_queue=queue
        )

        fn_dlq_process = lambda_.Function(self, "DLQ_Process_func",
            runtime=lambda_.Runtime.PYTHON_3_8,
            handler="lambda_function.handler",
            code=lambda_.Code.asset('lambda_dlq')
        )


        rule = events.Rule(
            self, "Rule",
            schedule=events.Schedule.cron(
                minute='0',
                hour='11')
        )


        rule.add_target(targets.LambdaFunction(fn,
            dead_letter_queue=queue, # Optional: add a dead letter queue
            max_event_age=cdk.Duration.hours(2), # Otional: set the maxEventAge retry policy
            retry_attempts=2
        ))

        rule_dlq = events.Rule(
            self, "Rule_DLQ",
            schedule=events.Schedule.cron(
                minute='0',
                hour='12')
        )


        rule_dlq.add_target(targets.LambdaFunction(fn_dlq_process))


        log_group = logs.LogGroup(self, "EventsLogGroup",
            log_group_name="EventsLogGroup"
        )


        rule.add_target(targets.CloudWatchLogGroup(log_group))
Ejemplo n.º 23
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        #import function code
        try:
            with open("serverless_stack/functions/function.py",
                      mode="r") as file:
                function_body = file.read()
        except OSError:
            print('File can not read')

        function_01 = aws_lambda.Function(
            self,
            "lambdafunction01",
            function_name="LambdaTestCDK",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            handler="index.lambda_handler",
            code=aws_lambda.InlineCode(function_body),
            timeout=core.Duration.seconds(5),
            reserved_concurrent_executions=1,
            environment={'LOG_LEVEL': 'INFO'})

        #attached cloudwatch log group
        log_group01 = aws_logs.LogGroup(
            self,
            "cloudwatchlog01",
            log_group_name=f"/aws/lambda/{function_01.function_name}",
            removal_policy=core.RemovalPolicy.DESTROY)

        #cloudwatch event trigger on 6am
        cron_01 = aws_events.Rule(self,
                                  "cron01",
                                  schedule=aws_events.Schedule.cron(
                                      minute="0",
                                      hour="6",
                                      month="*",
                                      week_day="MON-FRI",
                                      year="*"))

        #cloudwatch event trigger every 5min
        cron_02 = aws_events.Rule(self,
                                  "cron02",
                                  schedule=aws_events.Schedule.rate(
                                      core.Duration.minutes(5)))

        #add triggers to lambda
        cron_01.add_target(aws_events_targets.LambdaFunction(function_01))
        cron_02.add_target(aws_events_targets.LambdaFunction(function_01))
Ejemplo n.º 24
0
    def __init__(self, scope: core.App, id: str, props, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        
        # Setup CloudTrail to stream logs to CloudWatch logs
        log_group=aws_logs.LogGroup(self, "CloudTrailLogs", 
            log_group_name="CloudTrailLogs",
            retention=aws_logs.RetentionDays.TWO_WEEKS)
        aws_cloudtrail.Trail(self, "OpsTrail",
            cloud_watch_log_group=log_group, 
            cloud_watch_logs_retention=aws_logs.RetentionDays.TWO_WEEKS,
            send_to_cloud_watch_logs=True, 
            trail_name="OpsTrail")

        self.config_source_bucket = aws_s3.Bucket(self, "AmplifyConfigBucket", bucket_name=f"amplify-ci-assets-{self.account}", removal_policy=core.RemovalPolicy.DESTROY)
        
 def create_log_group_and_stream(self) -> aws_logs.LogGroup:
     log_group = aws_logs.LogGroup(
         self,
         "integ_test_firehose_delivery_log_group",
         log_group_name=FirehoseStack.LOG_GROUP_NAME,
         removal_policy=core.RemovalPolicy.DESTROY,
         retention=aws_logs.RetentionDays.FIVE_DAYS,
     )
     aws_logs.LogStream(
         self,
         "integ_test_firehose_delivery_log_stream",
         log_group=log_group,
         log_stream_name=FirehoseStack.LOG_STREAM_NAME,
         removal_policy=core.RemovalPolicy.DESTROY,
     )
     return log_group
Ejemplo n.º 26
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create Serverless Event Processor using Lambda):
        # Read Lambda Code
        try:
            with open("serverless_stacks/lambda_src/konstone_hello_world.py",
                      mode="r") as f:
                konstone_fn_code = f.read()
        except OSError:
            print("Unable to read Lambda Function Code")

        konstone_fn = _lambda.Function(
            self,
            "konstoneFunction",
            function_name="konstone_function",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="index.lambda_handler",
            code=_lambda.InlineCode(konstone_fn_code),
            timeout=core.Duration.seconds(3),
            reserved_concurrent_executions=1,
            environment={
                "LOG_LEVEL": "INFO",
                "Environment": "Production"
            })

        # Create Custom Loggroup
        # /aws/lambda/function-name
        konstone_lg = _logs.LogGroup(
            self,
            "konstoneLoggroup",
            log_group_name=f"/aws/lambda/{konstone_fn.function_name}",
            retention=_logs.RetentionDays.ONE_WEEK,
            removal_policy=core.RemovalPolicy.DESTROY)

        # Add API GW front end for the Lambda
        konstone_fn_integration = _apigw.LambdaRestApi(self,
                                                       "konstoneApiEndpoint",
                                                       handler=konstone_fn)

        output_1 = core.CfnOutput(
            self,
            "ApiUrl",
            value=f"{konstone_fn_integration.url}",
            description="Use a browser to access this url")
Ejemplo n.º 27
0
    def __init__(self, app: App, id: str) -> None:
        super().__init__(app, id)

        # Lambda Function
        with open("lambda-handler.py", encoding="utf8") as fp:
            handler_code = fp.read()

        lambdaFn = _lambda.Function(
            self,
            "IoTTriggerLambda",
            code=_lambda.InlineCode(handler_code),
            handler="index.main",
            timeout=Duration.seconds(10),
            runtime=_lambda.Runtime.PYTHON_3_9,
        )

        # Set Lambda Logs Retention and Removal Policy
        logs.LogGroup(self,
                      'logs',
                      log_group_name=f"/aws/lambda/{lambdaFn.function_name}",
                      removal_policy=RemovalPolicy.DESTROY,
                      retention=logs.RetentionDays.ONE_DAY)

        # IoT Thing
        iot_thing = iot.CfnThing(self, "IoTThing", thing_name="MyIotThing")

        # IoT Rule with SQL, which invokes a Lambda Function
        iot_topic_rule_sql = 'SELECT * FROM "$aws/things/MyIotThing/*"'
        iot_topic_rule = iot.CfnTopicRule(
            self,
            "IoTRule",
            topic_rule_payload=iot.CfnTopicRule.TopicRulePayloadProperty(
                sql=iot_topic_rule_sql,
                actions=[
                    iot.CfnTopicRule.ActionProperty(
                        lambda_=iot.CfnTopicRule.LambdaActionProperty(
                            function_arn=lambdaFn.function_arn))
                ]))

        # Lambda Resource Policy allows invocation from IoT Rule
        lambdaFn.add_permission(
            "GrantIoTRule",
            principal=iam.ServicePrincipal("iot.amazonaws.com"),
            source_arn=iot_topic_rule.attr_arn)
Ejemplo n.º 28
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        #import function code
        try:
            with open("serverless_stack/functions/function-helloworld.py",
                      mode="r") as file:
                function_body = file.read()
        except OSError:
            print('File can not read')

        #lambda
        function_01 = aws_lambda.Function(
            self,
            "lambdafunction01",
            function_name="LambdaTestAPIGW",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            handler="index.lambda_handler",
            code=aws_lambda.InlineCode(function_body),
            timeout=core.Duration.seconds(5),
            reserved_concurrent_executions=1,
            environment={
                'LOG_LEVEL': 'INFO',
                'Enviroment': 'Production'
            })

        #attached cloudwatch log group
        log_group01 = aws_logs.LogGroup(
            self,
            "cloudwatchlog01",
            log_group_name=f"/aws/lambda/{function_01.function_name}",
            retention=aws_logs.RetentionDays.ONE_DAY,
            removal_policy=core.RemovalPolicy.DESTROY)

        #add API-GW
        api_gateway_function = aws_apigateway.LambdaRestApi(
            self, "apigateway01", handler=function_01)

        #output
        apigw_output = core.CfnOutput(self,
                                      "apigwOutput",
                                      value=f"{api_gateway_function.url}",
                                      description="web url for apigw")
Ejemplo n.º 29
0
    def __init__(self, scope: core.Stack, id: str, vpc, cluster,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        self.vpc = vpc
        self.cluster = cluster

        # Building a custom image for jenkins leader.
        self.container_image = ecr.DockerImageAsset(
            self, "JenkinsWorkerDockerImage", directory='./docker/worker/')

        # Security group to connect workers to leader
        self.worker_security_group = ec2.SecurityGroup(
            self,
            "WorkerSecurityGroup",
            vpc=self.vpc,
            description="Jenkins Worker access to Jenkins leader",
        )

        # IAM execution role for the workers to pull from ECR and push to CloudWatch logs
        self.worker_execution_role = iam.Role(
            self,
            "WorkerExecutionRole",
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'),
        )

        self.worker_execution_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                'service-role/AmazonECSTaskExecutionRolePolicy'))

        # Task role for worker containers - add to this role for any aws resources that jenkins requires access to
        self.worker_task_role = iam.Role(
            self,
            "WorkerTaskRole",
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'),
        )

        # Create log group for workers to log
        self.worker_logs_group = logs.LogGroup(
            self, "WorkerLogGroup", retention=logs.RetentionDays.ONE_DAY)

        # Create log stream for worker log group
        self.worker_log_stream = logs.LogStream(
            self, "WorkerLogStream", log_group=self.worker_logs_group)
Ejemplo n.º 30
0
def create_ecs(self, vpc, sg_dictionary, repository):

    # Cluster
    cluster = _ecs.Cluster(
        self, 'Cluster',
        cluster_name='DEMO-CLUSTER',
        vpc=vpc
    )

    # Role(task execution)
    execution_role = _iam.Role(
        self, 'ExecutionRole',
        role_name='DEMO-TASK-EXECUTION-ROLE',
        assumed_by=_iam.ServicePrincipal('ecs-tasks.amazonaws.com')
    )
    execution_role.add_managed_policy(_iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AmazonECSTaskExecutionRolePolicy'))

    # TaskDefinition
    task_def = _ecs.TaskDefinition(
        self, 'TaskDefinition',
        compatibility=_ecs.Compatibility.FARGATE,
        cpu='2048',
        memory_mib='8192',
        network_mode=_ecs.NetworkMode.AWS_VPC,
        execution_role=execution_role,
        family='DEMO-TASK',
        task_role=execution_role,
    )

    # Container
    container = task_def.add_container(
        id='DEMO-CONTAINER',
        image=_ecs.ContainerImage.from_ecr_repository(repository),
        logging=_ecs.LogDriver.aws_logs(
            stream_prefix='ecs',
            log_group=_logs.LogGroup(
                self, 'LogGroup',
                log_group_name='/ecs/'+'DEMO-TASK',
                retention=_logs.RetentionDays.INFINITE,
            )
        )
    )
    container.add_port_mappings(_ecs.PortMapping(container_port=8080))