Exemplo n.º 1
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self.event_bus = EventBus(scope=self,
                                  id='CustomEventBus',
                                  event_bus_name='CustomEventBus')

        self.source = Function(
            scope=self,
            id=f'SourceFunction',
            function_name=f'SourceFunction',
            code=Code.from_asset(path='./code_source/'),
            handler='index.handler',
            runtime=Runtime.PYTHON_3_6,
        )

        self.source.add_to_role_policy(statement=PolicyStatement(
            actions=['events:PutEvents'],
            resources=[self.event_bus.event_bus_arn]))
        """
        Define rule.
        """

        self.rule = Rule(
            scope=self,
            id='EventBusRule',
            description='Sample description.',
            enabled=True,
            event_bus=self.event_bus,
            event_pattern=EventPattern(detail={
                'Domain': ["MedInfo"],
                'Reason': ["InvokeTarget"]
            }),
            rule_name='EventBusRule',
        )
        """
        Add target.
        """

        self.target = Function(
            scope=self,
            id=f'TargetFunction',
            function_name=f'TargetFunction',
            code=Code.from_asset(path='./code_target/'),
            handler='index.handler',
            runtime=Runtime.PYTHON_3_6,
        )

        self.target: Union[IRuleTarget, LambdaFunction] = LambdaFunction(
            handler=self.target)
        self.rule.add_target(target=self.target)
Exemplo n.º 2
0
 def __init__(self, scope: Stack):
     super().__init__(
         scope=scope,
         id=f'{TestingStack.global_prefix()}FunctionWithUnitTests',
         code=Code.from_asset(root),
         handler='handler.handler',
         runtime=Runtime.PYTHON_3_8,
         timeout=Duration.minutes(5),
         memory_size=512,
         layers=[
             Layer(
                 scope=scope,
                 name=
                 f'{TestingStack.global_prefix()}TestingLayerWithUnitTests',
                 dependencies={
                     # These dependencies are required for running unit tests inside lambda functions.
                     # Pytest is used for running actual unit tests.
                     'pytest':
                     PackageVersion.from_string_version('6.2.5'),
                     # Pook is used for HTTP mocking, therefore it is also needed here.
                     'pook':
                     PackageVersion.from_string_version('1.0.1'),
                     # Not sure about this dependency. Lambda runtime throws errors if its missing.
                     'aws-cdk.core':
                     PackageVersion.from_string_version('1.99.0'),
                     # This dependency should be installed with 'pook' since it depends on 'jsonschema' which depends on this.
                     # For some reason it doesn't.
                     # Tests would fail with import error otherwise.
                     'importlib-resources':
                     PackageVersion.from_string_version('5.4.0')
                 })
         ])
Exemplo n.º 3
0
    def init_lambda(self):

        tmp_dir = install_lambda_code_requirements()

        lambda_code = Code.from_asset(str(tmp_dir),
                                      exclude=[
                                          ".env",
                                          "__main*",
                                          "*.dist-info",
                                          "bin",
                                          "requirements.txt",
                                      ])

        lambda_function = Function(self,
                                   "lambda",
                                   code=lambda_code,
                                   handler="main.handler",
                                   runtime=Runtime.PYTHON_3_8)

        lambda_function.role.assume_role_policy.add_statements(
            PolicyStatement(
                actions=["sts:AssumeRole"],
                principals=[ServicePrincipal("edgelambda.amazonaws.com")]))

        version = Version(self, "version", lambda_=lambda_function)

        apply_removal_policy(lambda_function, version, lambda_function.role)

        return version
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 lambda_context: str, **kwargs) -> None:
        super().__init__(scope, construct_id)

        fn = dict(self.node.try_get_context(lambda_context))

        lambda_fn = Function(
            self,
            fn["fn_name"],
            function_name=fn["fn_name"],
            runtime=Runtime.PYTHON_3_8,
            handler=fn["fn_handler"],
            code=Code.from_asset(fn["fn_path"]),
            tracing=Tracing.ACTIVE,
            current_version_options={
                "removal_policy": cdk.RemovalPolicy.RETAIN
            },
            retry_attempts=fn["fn_retry_attempts"],
            timeout=Duration.seconds(fn["fn_timeout"]),
            reserved_concurrent_executions=fn["fn_reserved_concurrency"])

        lambda_fn_alias = lambda_fn.current_version.add_alias(fn["fn_alias"])

        # # Outputs

        cdk.CfnOutput(self,
                      fn["fn_name"] + 'Arn',
                      value=lambda_fn.function_arn)

        self._function = lambda_fn
        self._function_alias = lambda_fn_alias
Exemplo n.º 5
0
    def _create_layers(self):
        if os.path.isdir(BUILD_FOLDER):
            shutil.rmtree(BUILD_FOLDER)
        os.mkdir(BUILD_FOLDER)

        for layer in os.listdir(LAYERS_DIR):
            layer_folder = os.path.join(LAYERS_DIR, layer)
            build_folder = os.path.join(BUILD_FOLDER, layer)
            shutil.copytree(layer_folder, build_folder)

            requirements_path = os.path.join(build_folder, "requirements.txt")

            if os.path.isfile(requirements_path):
                packages_folder = os.path.join(build_folder, "python", "lib", "python3.8", "site-packages")
                # print(f"Installing layer requirements to target: {os.path.abspath(packages_folder)}")
                subprocess.check_output(["pip", "install", "-r", requirements_path, "-t", packages_folder])
                clean_pycache()

            self.layers[layer] = LayerVersion(
                self,
                layer,
                layer_version_name=f"movies-{layer}",
                code=Code.from_asset(path=build_folder),
                compatible_runtimes=[Runtime.PYTHON_3_8],
            )
Exemplo n.º 6
0
    def __init__(self, scope: Construct, id: str, *, deployment: Deployment,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        Tags.of(self).add("Application", self.application_name)
        Tags.of(self).add("Deployment", deployment.value)

        func = lambda_edge.create_function(
            self,
            f"Preview{deployment.value}IndexRedirect",
            runtime=Runtime.NODEJS_10_X,
            handler="index.handler",
            code=Code.from_asset("./lambdas/preview-redirect"),
        )

        s3_cloud_front = S3CloudFront(
            self,
            "S3CloudFront",
            subdomain_name=self.subdomain_name,
            error_folder="/errors",
            lambda_function_associations=[
                LambdaFunctionAssociation(
                    event_type=LambdaEdgeEventType.ORIGIN_REQUEST,
                    lambda_function=func,
                ),
            ],
        )

        S3CloudFrontPolicy(
            self,
            "S3cloudFrontPolicy",
            s3_cloud_front=s3_cloud_front,
        )
Exemplo n.º 7
0
    def create_lambda(self, envs: EnvSettings):
        is_app_only = self.node.try_get_context("is_app_only")

        if is_app_only == "true":
            code = Code.from_asset(
                path="../backend/functions/image_resize/.serverless/main.zip")
        else:
            code = Code.from_cfn_parameters()

        function = Function(
            self,
            "image-resize-lambda",
            function_name=f"{envs.project_name}-image-resize",
            code=code,
            handler="index.handler",
            runtime=Runtime.NODEJS_12_X,
            memory_size=512,
            timeout=Duration.seconds(30),
            tracing=Tracing.ACTIVE,
        )

        api_gateway = LambdaRestApi(
            self,
            "ImageResizeLambdaApi",
            rest_api_name=f"{envs.project_name}-image-resize",
            handler=function)

        return function, code, api_gateway
Exemplo n.º 8
0
    def __init__(self, scope: Construct, id: str, *, deployment: Deployment,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        Tags.of(self).add("Application", self.application_name)
        Tags.of(self).add("Deployment", deployment.value)

        bucket_site = Bucket(
            self,
            "Site",
            block_public_access=BlockPublicAccess.BLOCK_ALL,
        )

        bucket_access_logs = Bucket(
            self,
            "AccessLogs",
            encryption=BucketEncryption.S3_MANAGED,
            block_public_access=BlockPublicAccess.BLOCK_ALL,
        )

        for subdomain_name in self.subdomain_names:
            func_version = lambda_edge.create_function(
                self,
                f"Redirect-{subdomain_name}-{deployment.value}",
                runtime=Runtime.NODEJS_10_X,
                handler="index.handler",
                code=Code.from_asset(f"./lambdas/redirect-{subdomain_name}"),
            )

            if subdomain_name == "grfsearch":
                S3CloudFrontV2(
                    self,
                    f"S3CloudFront-{subdomain_name}",
                    subdomain_name=subdomain_name,
                    bucket_site=bucket_site,
                    bucket_access_logs=bucket_access_logs,
                    edge_lambdas=[
                        EdgeLambda(
                            event_type=LambdaEdgeEventType.ORIGIN_REQUEST,
                            function_version=func_version,
                        ),
                    ],
                    forward_query_string=True,
                    forward_query_string_cache_keys=["do", "q"],
                )
            else:
                S3CloudFront(
                    self,
                    f"S3CloudFront-{subdomain_name}",
                    subdomain_name=subdomain_name,
                    bucket_site=bucket_site,
                    bucket_access_logs=bucket_access_logs,
                    lambda_function_associations=[
                        LambdaFunctionAssociation(
                            event_type=LambdaEdgeEventType.ORIGIN_REQUEST,
                            lambda_function=func_version,
                        ),
                    ],
                )
Exemplo n.º 9
0
    def __code(self) -> Code:
        """
        Gets (and caches) source code cor the lambda function.

        :return: Lambda function source code (as an asset).
        """
        from .source import root
        return Code.from_asset(root)
Exemplo n.º 10
0
    def _create_lambdas(self):
        clean_pycache()

        for root, dirs, files in os.walk(LAMBDAS_DIR):
            for f in files:
                if f != "__init__.py":
                    continue

                parent_folder = os.path.basename(os.path.dirname(root))
                lambda_folder = os.path.basename(root)
                name = f"{parent_folder}-{lambda_folder}"
                lambda_config = self.lambdas_config[name]

                layers = []
                for layer_name in lambda_config["layers"]:
                    layers.append(self.layers[layer_name])

                lambda_role = Role(
                    self,
                    f"{name}_role",
                    assumed_by=ServicePrincipal(service="lambda.amazonaws.com")
                )
                for policy in lambda_config["policies"]:
                    lambda_role.add_to_policy(policy)
                lambda_role.add_managed_policy(
                    ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole"))

                lambda_args = {
                    "code": Code.from_asset(root),
                    "handler": "__init__.handle",
                    "runtime": Runtime.PYTHON_3_8,
                    "layers": layers,
                    "function_name": name,
                    "environment": lambda_config["variables"],
                    "role": lambda_role,
                    "timeout": Duration.seconds(lambda_config["timeout"]),
                    "memory_size": lambda_config["memory"],
                }
                if "concurrent_executions" in lambda_config:
                    lambda_args["reserved_concurrent_executions"] = lambda_config["concurrent_executions"]

                self.lambdas[name] = Function(self, name, **lambda_args)

        self.lambdas["sqs_handlers-post_anime"].add_event_source(SqsEventSource(self.post_anime_queue))

        Rule(
            self,
            "titles_updater",
            schedule=Schedule.cron(hour="2", minute="10"),
            targets=[LambdaFunction(self.lambdas["crons-titles_updater"])]
        )
        Rule(
            self,
            "episodes_updater",
            schedule=Schedule.cron(hour="4", minute="10"),
            targets=[LambdaFunction(self.lambdas["crons-episodes_updater"])]
        )
 def layer_for(self, name: str, base: str, runtimes: List[Runtime]):
     bundling = self._get_bundling(
         base, source_path="python/lib/python3.8/site-packages")
     code = Code.from_asset(str(self.source_path), bundling=bundling)
     layer = LayerVersion(self,
                          name,
                          code=code,
                          compatible_runtimes=runtimes)
     return layer
Exemplo n.º 12
0
    def __init__(self, scope: Stack, name: str):
        self.__scope = scope

        super().__init__(
            scope=scope,
            id=name,
            code=Code.from_asset(root),
            compatible_runtimes=self.runtimes(),
            layer_version_name=name,
        )
Exemplo n.º 13
0
def mock_layer_init(
    self, scope: constructs.Construct, id: builtins.str, *, code: Code, **kwargs
) -> None:
    # overriding the layers will prevent building with docker (a long running operation)
    # override the runtime list for now, as well, to match above
    with TemporaryDirectory() as tmpdirname:
        kwargs["code"] = Code.from_asset(path=tmpdirname)
        kwargs["compatible_runtimes"] = [Runtime.PYTHON_3_7]
        props = LayerVersionProps(**kwargs)
        jsii.create(LayerVersion, self, [scope, id, props])
Exemplo n.º 14
0
 def deploy_aws_ecs_public_dns(self):
     code_path = join(dirname(dirname(__file__)), 'build',
                      'aws-ecs-public-dns.zip')
     func = Function(self._stack,
                     'public_dns',
                     runtime=Runtime.NODEJS_12_X,
                     handler='src/update-task-dns.handler',
                     memory_size=128,
                     code=Code.from_asset(path=code_path))
     self._tag_it(func)
     func.add_to_role_policy(
         statement=self.get_public_dns_policy_statement())
     self.create_event_rule(func)
Exemplo n.º 15
0
    def create_dependencies_layer(self) -> LayerVersion:
        """
        Creates a lambda layer containing the external packages (pyotp, requests) which are
        required for the secret rotation
        """
        requirements_file = 'lambda_layers/external_dependencies/requirements.txt'
        output_dir = 'lambda_layers/external_dependencies'

        subprocess.check_call(
            f'pip3 install --upgrade -r {requirements_file} -t {output_dir}/python'
            .split())

        layer_id = 'external-dependencies'
        layer_code = Code.from_asset(output_dir)

        return LayerVersion(self, layer_id, code=layer_code)
Exemplo n.º 16
0
    def __init__(
        self,
        scope: Construct,
        id: str,
        *,
        deployment: Deployment,
        additional_fqdns: Optional[List[str]] = None,
        **kwargs,
    ) -> None:
        super().__init__(scope, id, **kwargs)

        Tags.of(self).add("Application", self.application_name)
        Tags.of(self).add("Deployment", deployment.value)

        func = lambda_edge.create_function(
            self,
            f"BananasCdnRedirect{deployment.value}",
            runtime=Runtime.NODEJS_10_X,
            handler="index.handler",
            code=Code.from_asset("./lambdas/bananas-cdn"),
        )

        s3_cloud_front = S3CloudFront(
            self,
            "S3CloudFront",
            subdomain_name=self.subdomain_name,
            error_folder="/errors",
            lambda_function_associations=[
                LambdaFunctionAssociation(
                    event_type=LambdaEdgeEventType.ORIGIN_REQUEST,
                    lambda_function=func,
                ),
            ],
            price_class=PriceClass.PRICE_CLASS_ALL,
            additional_fqdns=additional_fqdns,
            viewer_protocol_policy=ViewerProtocolPolicy.
            ALLOW_ALL,  # OpenTTD client doesn't support HTTPS
        )
        self.bucket = s3_cloud_front.bucket_site

        S3CloudFrontPolicy(
            self,
            "S3cloudFrontPolicy",
            s3_cloud_front=s3_cloud_front,
            with_s3_get_object_access=True,
        )
Exemplo n.º 17
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 lambda_context: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        fn = dict(self.node.try_get_context(lambda_context))

        # lambda dlq
        lambda_fn_dlq = _sqs.Queue(self,
                                   fn["fn_dlq_name"],
                                   queue_name=fn["fn_dlq_name"])

        lambda_fn = Function(
            self,
            fn["fn_name"],
            function_name=fn["fn_name"],
            runtime=Runtime.PYTHON_3_8,
            handler=fn["fn_handler"],
            code=Code.from_asset(fn["fn_path"]),
            tracing=Tracing.ACTIVE,
            current_version_options={
                "removal_policy": core.RemovalPolicy.RETAIN
            },
            environment={
                "ENVIRONMENT_VALUE": "DUMMY_VALUE",
            },
            dead_letter_queue=lambda_fn_dlq,
            retry_attempts=fn["fn_retry_attempts"],
            timeout=Duration.seconds(fn["fn_timeout"]),
            reserved_concurrent_executions=fn["fn_reserved_concurrency"])

        lambda_fn_alias = lambda_fn.current_version.add_alias(fn["fn_alias"])

        lambda_fn_dlq.grant_send_messages(lambda_fn)

        # # Outputs

        core.CfnOutput(self,
                       fn["fn_name"] + 'Arn',
                       value=(lambda_fn.function_arn))

        self._function = lambda_fn
        self._function_alias = lambda_fn_alias
        self._function_dlq = lambda_fn_dlq
    def functions_for(
        self,
        name,
        base,
        handlers,
        libs=None,
        timeout=Duration.minutes(5),
        runtime=Runtime.PYTHON_3_8,
        layers=None,
    ) -> Dict[str, Function]:
        if isinstance(handlers, str):
            handlers = [handlers]
        if not isinstance(handlers, list):
            raise ValueError("handlers must be a string or a list of handlers")
        if isinstance(libs, str):
            libs = [libs]
        if isinstance(layers, str):
            layers = [layers]
        if libs and not isinstance(libs, list):
            raise ValueError("libs must be a string or a list of libraries")

        bundling = self._get_bundling(base, libs=libs)
        code = Code.from_asset(str(self.source_path), bundling=bundling)
        role = self.build_lambda_role(name)
        functions = {}
        for handler in handlers:
            func_name = name + handler.split(".")[0].replace(
                "_", " ").title().replace(" ", "").replace("Handler", "")
            functions.update({
                func_name:
                Function(
                    self,
                    func_name,
                    handler=handler,
                    code=code,
                    runtime=runtime,
                    timeout=timeout,
                    role=role,
                    layers=layers,
                    environment={"LOG_LEVEL": self.log_level},
                )
            })
        return functions
Exemplo n.º 19
0
    def __init__(self,
                 scope: Stack,
                 name: str,
                 boto3_version: Optional[str] = None):
        install_command = [
            'pip install -r requirements.txt -t /tmp/asset-output/python',
        ]

        if boto3_version:
            install_command.append(
                f'pip install boto3=={boto3_version} -t /tmp/asset-output/python'
            )

        build_command = [
            # Copy.
            'cp -R /tmp/asset-output/. /asset-output/.',
            'cp -R /asset-input/. /asset-output/.',

            # Cleanup.
            'find /asset-output/ -type f -name "*.py[co]" -delete',
            'find /asset-output/ -type d -name "__pycache__" -exec rm -rf {} +',
            'find /asset-output/ -type d -name "*.dist-info" -exec rm -rf {} +',
            'find /asset-output/ -type d -name "*.egg-info" -exec rm -rf {} +',

            # Validation.
            'ls -la /asset-output/python/.',
            'find /asset-output/ -type f -print0 | sort -z | xargs -0 sha1sum | sha1sum'
        ]

        super().__init__(
            scope=scope,
            id=name,
            layer_version_name=name,
            code=Code.from_asset(
                self.get_source_path(),
                asset_hash_type=AssetHashType.BUNDLE,
                bundling=BundlingOptions(
                    image=BundlingDockerImage.from_registry('python:3.9'),
                    command=[
                        'bash', '-c',
                        ' && '.join(install_command + build_command)
                    ])),
            compatible_runtimes=self.runtimes())
Exemplo n.º 20
0
    def _create_lambdas(self):
        for root, dirs, files in os.walk(LAMBDAS_DIR):
            for f in files:
                if f != "__init__.py":
                    continue

                parent_folder = os.path.basename(os.path.dirname(root))
                lambda_folder = os.path.basename(root)
                name = f"{parent_folder}-{lambda_folder}"
                lambda_config = self.lambdas_config[name]

                layers = []
                for layer_name in lambda_config["layers"]:
                    layers.append(self.layers[layer_name])

                lambda_role = Role(self,
                                   f"{name}_role",
                                   assumed_by=ServicePrincipal(
                                       service="lambda.amazonaws.com"))
                for policy in lambda_config["policies"]:
                    lambda_role.add_to_policy(policy)
                lambda_role.add_managed_policy(
                    ManagedPolicy.from_aws_managed_policy_name(
                        "service-role/AWSLambdaBasicExecutionRole"))

                self.lambdas[name] = Function(
                    self,
                    name,
                    code=Code.from_asset(root),
                    handler="__init__.handle",
                    runtime=Runtime.PYTHON_3_8,
                    layers=layers,
                    function_name=name,
                    environment=lambda_config["variables"],
                    role=lambda_role,
                    timeout=Duration.seconds(lambda_config["timeout"]),
                    memory_size=lambda_config["memory"],
                )

        Rule(self,
             "update_eps",
             schedule=Schedule.cron(hour="2", minute="10"),
             targets=[LambdaFunction(self.lambdas["cron-update_eps"])])
Exemplo n.º 21
0
    def _create_lambda_fn(self, envs: EnvSettings, memory_size: int,
                          queue: Queue):
        is_app_only = self.node.try_get_context("is_app_only")

        if is_app_only == "true":
            code = Code.from_asset(
                path="../backend/functions/worker/.serverless/main.zip")
        else:
            code = Code.from_cfn_parameters()

        function = Function(
            self,
            f"data-processing-worker-{memory_size}",
            function_name=f"{envs.project_name}-data-processing-{memory_size}",
            code=code,
            runtime=Runtime.PYTHON_3_8,
            handler="handler.main",
            environment={
                "AWS_STORAGE_BUCKET_NAME":
                self.app_bucket.bucket_name,
                "IMAGE_SCRAPING_FETCH_TIMEOUT":
                "15",
                "AWS_IMAGE_STORAGE_BUCKET_NAME":
                self.resize_lambda_image_bucket.bucket_name,
                "AWS_IMAGE_STATIC_URL":
                self.resize_lambda_image_bucket.bucket_website_url,
                "BACKEND_URL":
                self.backend_url,
                "LAMBDA_AUTH_TOKEN":
                self.lambda_auth_token.secret_value.to_string(),
            },
            memory_size=memory_size,
            timeout=Duration.seconds(300),
            tracing=Tracing.ACTIVE,
        )

        function.add_event_source(SqsEventSource(queue, batch_size=1))

        self.app_bucket.grant_read_write(function.role)
        self.resize_lambda_image_bucket.grant_read_write(function.role)

        return function, code
Exemplo n.º 22
0
    def __init__(self,
                 scope: Construct,
                 id: str,
                 *,
                 deployment: Deployment,
                 additional_fqdns: Optional[List[str]] = None,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        Tags.of(self).add("Application", self.application_name)
        Tags.of(self).add("Deployment", deployment.value)

        func = lambda_edge.create_function(
            self,
            "CdnIndexRedirect",
            runtime=Runtime.NODEJS_10_X,
            handler="index.handler",
            code=Code.from_asset("./lambdas/index-redirect"),
        )

        s3_cloud_front = S3CloudFront(
            self,
            "S3CloudFront",
            subdomain_name=self.subdomain_name,
            error_folder="/errors",
            lambda_function_associations=[
                LambdaFunctionAssociation(
                    event_type=LambdaEdgeEventType.ORIGIN_REQUEST,
                    lambda_function=func,
                ),
            ],
            additional_fqdns=additional_fqdns,
            price_class=PriceClass.PRICE_CLASS_ALL,
        )

        S3CloudFrontPolicy(
            self,
            "S3cloudFrontPolicy",
            s3_cloud_front=s3_cloud_front,
            with_s3_get_object_access=True,
        )
    def __init__(self, scope: Stack, name: str):
        self.__scope = scope

        super().__init__(
            scope=scope,
            id=name,
            code=Code.from_asset(
                self.get_source_path(),
                asset_hash_type=AssetHashType.BUNDLE,
                bundling=BundlingOptions(
                    image=BundlingDockerImage.from_registry('python:3.9'),
                    command=[
                        'bash', '-c',
                        ('pip install -r requirements.txt -t /asset-output/python && '
                         'find /asset-output -type f -name "*.py[co]" -delete && '
                         'find /asset-output -type d -name "__pycache__" -delete'
                         )
                    ])),
            compatible_runtimes=self.runtimes(),
            layer_version_name=name,
        )
Exemplo n.º 24
0
    def create_ecs_lambda(self, cluster: ICluster,
                          auto_scaling_group: AutoScalingGroup):
        lambda_func = Function(
            self,
            "LambdaECS",
            code=Code.from_asset("./lambdas/nlb-ecs"),
            handler="index.lambda_handler",
            runtime=Runtime.PYTHON_3_8,
            timeout=Duration.seconds(30),
            environment={
                "AUTO_SCALING_GROUP_NAME":
                auto_scaling_group.auto_scaling_group_name,
            },
        )
        lambda_func.add_to_role_policy(
            PolicyStatement(
                actions=[
                    "autoscaling:DescribeAutoScalingGroups",
                    "ssm:SendCommand",
                    "ssm:GetCommandInvocation",
                ],
                resources=[
                    "*",
                ],
            ))

        Rule(
            self,
            "ECS",
            event_pattern=EventPattern(
                detail_type=["ECS Task State Change"],
                detail={
                    "clusterArn": [cluster.cluster_arn],
                },
                source=["aws.ecs"],
            ),
            targets=[LambdaFunction(lambda_func)],
        )
Exemplo n.º 25
0
    def __init__(
            self,
            stack: core.Stack,
            prefix: str,
            secret: aws_secretsmanager.Secret,
            vpc_parameters: VPCParameters,
            database: Union[aws_rds.CfnDBInstance, aws_rds.CfnDBCluster],
            kms_key: Optional[aws_kms.IKey] = None,
    ) -> None:
        """
        Constructor.

        :param stack: A stack in which resources should be created.
        :param prefix: A prefix to give for every resource.
        :param secret: A secret instance which the lambda function should be able to access.
        :param vpc_parameters: VPC parameters for resource (e.g. lambda rotation function) configuration.
        :param kms_key: Custom or managed KMS key for secret encryption which the
        lambda function should be able to access.
        """
        super().__init__()

        self.__prefix = prefix + 'SecretRotation'

        # Read more about the permissions required to successfully rotate a secret:
        # https://docs.aws.amazon.com/secretsmanager/latest/userguide//rotating-secrets-required-permissions.html
        rotation_lambda_role_statements = [
            # We enforce lambdas to run in a VPC.
            # Therefore lambdas need some network interface permissions.
            aws_iam.PolicyStatement(
                actions=[
                    'ec2:CreateNetworkInterface',
                    'ec2:ModifyNetworkInterface',
                    'ec2:DeleteNetworkInterface',
                    'ec2:AttachNetworkInterface',
                    'ec2:DetachNetworkInterface',
                    'ec2:DescribeNetworkInterfaces',
                    "logs:CreateLogGroup",
                    "logs:CreateLogStream",
                    "logs:PutLogEvents",
                ],
                effect=aws_iam.Effect.ALLOW,
                resources=['*']
            ),
            # Lambda needs to call secrets manager to get secret value in order to update database password.
            aws_iam.PolicyStatement(
                actions=[
                    "secretsmanager:DescribeSecret",
                    "secretsmanager:GetSecretValue",
                    "secretsmanager:PutSecretValue",
                    "secretsmanager:UpdateSecretVersionStage"
                ],
                effect=aws_iam.Effect.ALLOW,
                resources=[secret.secret_arn]
        ),
            # Not exactly sure about this one.
            # Despite that, this policy does not impose any security risks.
            aws_iam.PolicyStatement(
                actions=[
                    "secretsmanager:GetRandomPassword"
                ],
                effect=aws_iam.Effect.ALLOW,
                resources=['*']
            )
        ]

        if kms_key is not None:
            rotation_lambda_role_statements.append(
                # Secrets may be KMS encrypted.
                # Therefore the lambda function should be able to get this value.
                aws_iam.PolicyStatement(
                    actions=[
                        'kms:GenerateDataKey',
                        'kms:Decrypt',
                    ],
                    effect=aws_iam.Effect.ALLOW,
                    resources=[kms_key.key_arn],
                )
            )

        self.rotation_lambda_role = aws_iam.Role(
            scope=stack,
            id=self.__prefix + 'LambdaRole',
            role_name=self.__prefix + 'LambdaRole',
            assumed_by=aws_iam.CompositePrincipal(
                aws_iam.ServicePrincipal("lambda.amazonaws.com"),
                aws_iam.ServicePrincipal("secretsmanager.amazonaws.com"),
            ),
            inline_policies={
                self.__prefix + 'LambdaPolicy': aws_iam.PolicyDocument(
                    statements=rotation_lambda_role_statements
                )
            },
        )

        # Create rotation lambda functions source code path.
        dir_path = os.path.dirname(os.path.realpath(__file__))
        path = os.path.join(dir_path, self.LAMBDA_BACKEND_DEPLOYMENT_PACKAGE)

        # Create a lambda function responsible for rds password rotation.
        self.rotation_lambda_function = LambdaFunction(
            scope=stack,
            prefix=self.__prefix,
            description=(
                'A lambda function that is utilized by AWS SecretsManager to rotate a secret after X number of days. '
                'This lambda function connects to a given database and changes its password to whatever password was '
                'provides by AWS SecretsManager.'
            ),
            memory=128,
            timeout=60,
            handler='lambda_function.lambda_handler',
            runtime=Runtime.PYTHON_2_7,
            role=self.rotation_lambda_role,
            env={
                'SECRETS_MANAGER_ENDPOINT': f'https://secretsmanager.{stack.region}.amazonaws.com',
                'INITIAL_DATABASE_PASSWORD': database.master_user_password
            },
            security_groups=vpc_parameters.rotation_lambda_security_groups,
            subnets=vpc_parameters.rotation_lambda_subnets,
            vpc=vpc_parameters.rotation_lambda_vpc,
            source_code=Code.from_asset(path=path)
        ).lambda_function
Exemplo n.º 26
0
    def __init__(self,
                 scope: Construct,
                 id: str,
                 elasticsearch_index: ElasticsearchIndexResource,
                 dynamodb_table: Table,
                 kms_key: Optional[Key] = None,
                 *,
                 sagemaker_endpoint_name: str = None,
                 sagemaker_endpoint_arn: str = None,
                 sagemaker_embeddings_key: str = None) -> None:
        super().__init__(scope=scope, id=id)

        elasticsearch_layer = BElasticsearchLayer(
            scope=self, name=f"{id}ElasticsearchLayer")

        if bool(sagemaker_endpoint_name) ^ bool(sagemaker_embeddings_key):
            raise ValueError(
                f'In order to use sentence embedding, all of the following enviroment variables are required: '
                f'SAGEMAKER_ENDPOINT_NAME, SAGEMAKER_EMBEDDINGS_KEY. '
                f'Else, provide none of above.')

        if sagemaker_endpoint_name and not sagemaker_endpoint_arn:
            sagemaker_endpoint_arn = self.__resolve_sagemaker_endpoints_arn(
                '*')

        optional_sagemaker_parameters = {
            'SAGEMAKER_ENDPOINT_NAME': sagemaker_endpoint_name or None,
            'SAGEMAKER_EMBEDDINGS_KEY': sagemaker_embeddings_key or None
        }

        initial_cloner_function = SingletonFunction(
            scope=self,
            id='InitialClonerFunction',
            uuid='e01116a4-f939-43f2-8f5b-cc9f862c9e01',
            lambda_purpose='InitialClonerSingletonLambda',
            code=Code.from_asset(initial_cloner_root),
            handler='index.handler',
            runtime=Runtime.PYTHON_3_8,
            layers=[elasticsearch_layer],
            log_retention=RetentionDays.ONE_MONTH,
            memory_size=128,
            timeout=Duration.minutes(15),
            role=Role(
                scope=self,
                id='InitialClonerFunctionRole',
                assumed_by=ServicePrincipal('lambda.amazonaws.com'),
                inline_policies={
                    'LogsPolicy':
                    PolicyDocument(statements=[
                        PolicyStatement(
                            actions=[
                                'logs:CreateLogGroup',
                                'logs:CreateLogStream',
                                'logs:PutLogEvents',
                                'logs:DescribeLogStreams',
                            ],
                            resources=['arn:aws:logs:*:*:*'],
                            effect=Effect.ALLOW,
                        )
                    ]),
                    'ElasticsearchPolicy':
                    PolicyDocument(statements=[
                        PolicyStatement(
                            actions=[
                                'es:ESHttpDelete',
                                'es:ESHttpGet',
                                'es:ESHttpHead',
                                'es:ESHttpPatch',
                                'es:ESHttpPost',
                                'es:ESHttpPut',
                            ],
                            resources=['*'],
                            effect=Effect.ALLOW,
                        )
                    ]),
                    'DynamodbPolicy':
                    PolicyDocument(statements=[
                        PolicyStatement(
                            actions=['dynamodb:*'],
                            resources=['*'],
                            effect=Effect.ALLOW,
                        )
                    ]),
                },
                description='Role for DynamoDB Initial Cloner Function',
            ),
        )

        if kms_key:
            initial_cloner_function.add_to_role_policy(
                PolicyStatement(
                    actions=['kms:Decrypt'],
                    resources=[kms_key.key_arn],
                    effect=Effect.ALLOW,
                ), )

        initial_cloner = CustomResource(
            scope=self,
            id='InitialCloner',
            service_token=initial_cloner_function.function_arn,
            removal_policy=RemovalPolicy.DESTROY,
            properties={
                'DynamodbTableName':
                dynamodb_table.table_name,
                'ElasticsearchIndexName':
                elasticsearch_index.index_name,
                'ElasticsearchEndpoint':
                elasticsearch_index.elasticsearch_domain.domain_endpoint,
            },
            resource_type='Custom::ElasticsearchInitialCloner',
        )

        primary_key_field = initial_cloner.get_att_string('PrimaryKeyField')

        dynamodb_stream_arn = dynamodb_table.table_stream_arn
        if not dynamodb_stream_arn:
            raise Exception('DynamoDB streams must be enabled for the table')

        dynamodb_event_source = DynamoEventSource(
            table=dynamodb_table,
            starting_position=StartingPosition.LATEST,
            enabled=True,
            max_batching_window=Duration.seconds(10),
            bisect_batch_on_error=True,
            parallelization_factor=2,
            batch_size=1000,
            retry_attempts=10,
        )

        cloner_inline_policies = {
            'LogsPolicy':
            PolicyDocument(statements=[
                PolicyStatement(
                    actions=[
                        'logs:CreateLogGroup',
                        'logs:CreateLogStream',
                        'logs:PutLogEvents',
                        'logs:DescribeLogStreams',
                    ],
                    resources=['arn:aws:logs:*:*:*'],
                    effect=Effect.ALLOW,
                )
            ]),
            'ElasticsearchPolicy':
            PolicyDocument(statements=[
                PolicyStatement(
                    actions=[
                        'es:ESHttpDelete',
                        'es:ESHttpGet',
                        'es:ESHttpHead',
                        'es:ESHttpPatch',
                        'es:ESHttpPost',
                        'es:ESHttpPut',
                    ],
                    resources=[
                        f'{elasticsearch_index.elasticsearch_domain.domain_arn}/*'
                    ],
                    effect=Effect.ALLOW,
                )
            ]),
            'DynamodbStreamsPolicy':
            PolicyDocument(statements=[
                PolicyStatement(
                    actions=[
                        'dynamodb:DescribeStream',
                        'dynamodb:GetRecords',
                        'dynamodb:GetShardIterator',
                        'dynamodb:ListStreams',
                    ],
                    resources=[dynamodb_stream_arn],
                    effect=Effect.ALLOW,
                )
            ]),
        }

        if sagemaker_endpoint_arn:
            cloner_inline_policies['SagemakerPolicy'] = PolicyDocument(
                statements=[
                    PolicyStatement(actions=['sagemaker:InvokeEndpoint'],
                                    resources=[sagemaker_endpoint_arn],
                                    effect=Effect.ALLOW)
                ])

        cloner_function = Function(
            scope=self,
            id='ClonerFunction',
            code=Code.from_asset(cloner_root),
            handler='index.handler',
            runtime=Runtime.PYTHON_3_8,
            environment={
                'ES_INDEX_NAME': elasticsearch_index.index_name,
                'ES_DOMAIN_ENDPOINT':
                elasticsearch_index.elasticsearch_domain.domain_endpoint,
                'PRIMARY_KEY_FIELD': primary_key_field,
                **{
                    k: optional_sagemaker_parameters[k]
                    for k in optional_sagemaker_parameters if all(optional_sagemaker_parameters.values(
                    ))
                }
            },
            events=[dynamodb_event_source],
            layers=[elasticsearch_layer],
            log_retention=RetentionDays.ONE_MONTH,
            memory_size=128,
            role=Role(
                scope=self,
                id='ClonerFunctionRole',
                assumed_by=ServicePrincipal('lambda.amazonaws.com'),
                inline_policies=cloner_inline_policies,
                description='Role for DynamoDB Cloner Function',
            ),
            timeout=Duration.seconds(30),
        )

        if kms_key:
            cloner_function.add_to_role_policy(
                PolicyStatement(
                    actions=['kms:Decrypt'],
                    resources=[kms_key.key_arn],
                    effect=Effect.ALLOW,
                ))
Exemplo n.º 27
0
    def __init__(self,
                 scope: Stack,
                 id: str,
                 capacity: Optional[AddCapacityOptions] = None,
                 cluster_name: Optional[str] = None,
                 container_insights: Optional[bool] = None,
                 default_cloud_map_namespace: Optional[
                     CloudMapNamespaceOptions] = None,
                 vpc: Optional[IVpc] = None,
                 **kwargs) -> None:
        known_args = dict(
            scope=scope,
            id=id,
            capacity=capacity,
            cluster_name=cluster_name,
            container_insights=container_insights,
            default_cloud_map_namespace=default_cloud_map_namespace,
            vpc=vpc)

        unknown_args = kwargs

        super().__init__(**{**known_args, **unknown_args})

        self.__role = Role(
            scope=scope,
            id=cluster_name + 'CustomResourceRole',
            role_name=cluster_name + 'CustomResourceRole',
            assumed_by=CompositePrincipal(
                ServicePrincipal("lambda.amazonaws.com"),
                ServicePrincipal("cloudformation.amazonaws.com")),
            inline_policies={
                cluster_name + 'CustomResourcePolicy':
                PolicyDocument(statements=[
                    PolicyStatement(actions=[
                        "ecs:ListClusters",
                        "ecs:ListContainerInstances",
                        "ecs:ListServices",
                        "ecs:ListTaskDefinitions",
                        "ecs:ListTasks",
                        "ecs:DescribeClusters",
                        "ecs:DescribeContainerInstances",
                        "ecs:DescribeServices",
                        "ecs:DescribeTaskDefinition",
                        "ecs:DescribeTasks",
                        "ecs:CreateCluster",
                        "ecs:DeleteCluster",
                        "ecs:DeleteService",
                        "ecs:DeregisterContainerInstance",
                        "ecs:DeregisterTaskDefinition",
                        "ecs:StopTask",
                        "ecs:UpdateService",
                    ],
                                    effect=Effect.ALLOW,
                                    resources=['*']),
                    PolicyStatement(actions=[
                        "logs:CreateLogGroup", "logs:CreateLogStream",
                        "logs:PutLogEvents"
                    ],
                                    effect=Effect.ALLOW,
                                    resources=['*']),
                ])
            },
            managed_policies=[])

        self.__custom_backend = Function(
            scope=scope,
            id=cluster_name + 'Deleter',
            code=Code.from_asset(path=package_root),
            handler='index.handler',
            runtime=Runtime.PYTHON_3_6,
            description=
            f'A custom resource backend to delete ecs cluster ({cluster_name}) in the right way.',
            function_name=cluster_name + 'Deleter',
            memory_size=128,
            role=self.__role,
            timeout=Duration.seconds(900),
        )

        # noinspection PyTypeChecker
        provider: ICustomResourceProvider = CustomResourceProvider.from_lambda(
            self.__custom_backend)

        self.__custom_resource = CustomResource(
            scope=scope,
            id=cluster_name + 'CustomResource',
            provider=provider,
            removal_policy=RemovalPolicy.DESTROY,
            properties={'clusterName': cluster_name},
            resource_type='Custom::EmptyS3Bucket')

        # Make sure that custom resource is deleted before lambda function backend.
        self.__custom_resource.node.add_dependency(self.__custom_backend)
        # Make sure that custom resource is deleted before the bucket.
        self.__custom_resource.node.add_dependency(self)
Exemplo n.º 28
0
    def __init__(
            self,
            scope: Stack,
            id: str,
            on_create_action: Dict[str, Any],
            on_update_action: Dict[str, Any],
            on_delete_action: Dict[str, Any],
    ) -> None:
        """
        Constructor.

        :param scope: CloudFormation stack in which resources should be placed.
        :param id: Name (id) or prefix for resources.
        :param on_create_action: Create action arguments. Read more on:
        https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.create_service
        :param on_update_action: Update action arguments. Read more on:
        https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.update_service
        :param on_delete_action: Delete action arguments. Read more on:
        https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.delete_service
        """
        self.__role = Role(
            scope=scope,
            id=id + 'Role',
            role_name=id + 'Role',
            assumed_by=CompositePrincipal(
                ServicePrincipal("lambda.amazonaws.com"),
                ServicePrincipal("cloudformation.amazonaws.com")
            ),
            inline_policies={
                id + 'Policy': PolicyDocument(
                    statements=[
                        PolicyStatement(
                            actions=[
                                'ecs:createService',
                                'ecs:updateService',
                                'ecs:deleteService',
                                'ecs:describeServices',
                                'ecs:listServices',
                                'ecs:updateServicePrimaryTaskSet'
                            ],
                            effect=Effect.ALLOW,
                            resources=['*']
                        ),
                        PolicyStatement(
                            actions=[
                                "logs:CreateLogGroup",
                                "logs:CreateLogStream",
                                "logs:PutLogEvents"
                            ],
                            effect=Effect.ALLOW,
                            resources=['*']
                        ),
                    ]
                )
            },
            managed_policies=[]
        )

        self.__custom_backend = Function(
            scope=scope,
            id=id + 'Backend',
            code=Code.from_asset(
                path=package_root
            ),
            handler='index.handler',
            runtime=Runtime.PYTHON_3_6,
            description=f'A custom resource backend to manage ecs {id} service.',
            function_name=id + 'Backend',
            memory_size=128,
            role=self.__role,
            timeout=Duration.seconds(900),
        )

        # noinspection PyTypeChecker
        provider: ICustomResourceProvider = CustomResourceProvider.from_lambda(self.__custom_backend)

        self.__custom_resource = CustomResource(
            scope=scope,
            id=id + 'CustomResource',
            provider=provider,
            removal_policy=RemovalPolicy.DESTROY,
            properties={
                'onCreate': on_create_action,
                'onUpdate': on_update_action,
                'onDelete': on_delete_action
            },
            resource_type='Custom::EcsService'
        )

        # Make sure that custom resource is deleted before lambda function backend.
        self.__custom_resource.node.add_dependency(self.__custom_backend)
Exemplo n.º 29
0
    def __init__(
        self,
        scope: Construct,
        id: str,
        *,
        vpc: IVpc,
        cluster: ICluster,
        service: IEc2Service,
        ecs_security_group: SecurityGroup,
        deployment: Deployment,
        **kwargs,
    ) -> None:
        super().__init__(scope, id, **kwargs)

        Tags.of(self).add("Application", self.application_name)
        Tags.of(self).add("Deployment", deployment.value)

        security_group = SecurityGroup(
            self,
            "LambdaSG",
            vpc=vpc,
        )

        lambda_func = Function(
            self,
            "ReloadLambda",
            code=Code.from_asset("./lambdas/bananas-reload"),
            handler="index.lambda_handler",
            runtime=Runtime.PYTHON_3_8,
            timeout=Duration.seconds(120),
            environment={
                "CLUSTER": cluster.cluster_arn,
                "SERVICE": service.service_arn,
            },
            vpc=vpc,
            security_groups=[security_group, ecs_security_group],
            reserved_concurrent_executions=1,
        )
        lambda_func.add_to_role_policy(
            PolicyStatement(
                actions=[
                    "ec2:DescribeInstances",
                    "ecs:DescribeContainerInstances",
                    "ecs:DescribeTasks",
                    "ecs:ListContainerInstances",
                    "ecs:ListServices",
                    "ecs:ListTagsForResource",
                    "ecs:ListTasks",
                ],
                resources=[
                    "*",
                ],
            )
        )

        policy = ManagedPolicy(self, "Policy")
        policy.add_statements(
            PolicyStatement(
                actions=[
                    "lambda:InvokeFunction",
                ],
                resources=[lambda_func.function_arn],
            )
        )
 def __code(self) -> Code:
     from .source import root
     return Code.from_asset(root)