Ejemplo n.º 1
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        handler = _lambda.Function(
            self,
            "demo_func",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="demo_func.handler",
            timeout=Duration.minutes(1),  # pylint: disable=E1120
            code=_lambda.Code.asset("lambda_code/demo_func"),  # pylint: disable=E1120
        )

        api_gw = _apigw.RestApi(self,
                                "ApiGatewayForSlack",
                                rest_api_name="gw_for_slack")

        exam_entity = api_gw.root.add_resource("test")
        exam_entity_lambda_integration = _apigw.LambdaIntegration(
            handler,
            proxy=False,
            integration_responses=[{
                "statusCode": "200"
            }],
        )
        exam_entity.add_method(
            "GET",
            exam_entity_lambda_integration,
            method_responses=[{
                "statusCode": "200"
            }],
        )
Ejemplo n.º 2
0
 def __init__(self, scope: Stack):
     super().__init__(
         scope=scope,
         id=f'{TestingStack.global_prefix()}FunctionWithUnitTests',
         code=Code.from_asset(root),
         handler='handler.handler',
         runtime=Runtime.PYTHON_3_8,
         timeout=Duration.minutes(5),
         memory_size=512,
         layers=[
             Layer(
                 scope=scope,
                 name=
                 f'{TestingStack.global_prefix()}TestingLayerWithUnitTests',
                 dependencies={
                     # These dependencies are required for running unit tests inside lambda functions.
                     # Pytest is used for running actual unit tests.
                     'pytest':
                     PackageVersion.from_string_version('6.2.5'),
                     # Pook is used for HTTP mocking, therefore it is also needed here.
                     'pook':
                     PackageVersion.from_string_version('1.0.1'),
                     # Not sure about this dependency. Lambda runtime throws errors if its missing.
                     'aws-cdk.core':
                     PackageVersion.from_string_version('1.99.0'),
                     # This dependency should be installed with 'pook' since it depends on 'jsonschema' which depends on this.
                     # For some reason it doesn't.
                     # Tests would fail with import error otherwise.
                     'importlib-resources':
                     PackageVersion.from_string_version('5.4.0')
                 })
         ])
Ejemplo n.º 3
0
    def __init__(
            self,
            scope: Stack,
            name: str,
            twilio_account_sid: str,
            twilio_auth_token: str,
            twilio_workspace_sid: str
    ) -> None:
        self.__name = name

        super().__init__(
            scope=scope,
            id=name,
            uuid=f'{name}-uuid',
            function_name=name,
            code=self.__code(),
            layers=[TwilioLayer(scope, f'TwilioLayerFor{name}')],
            timeout=Duration.minutes(1),
            handler='index.handler',
            runtime=Runtime.PYTHON_3_8,
            environment={
                'TWILIO_ACCOUNT_SID': twilio_account_sid,
                'TWILIO_AUTH_TOKEN': twilio_auth_token,
                'TWILIO_WORKSPACE_SID': twilio_workspace_sid
            }
        )
Ejemplo n.º 4
0
    def __init__(self, scope: Stack, name: str) -> None:
        """
        Constructor.

        :param scope: CloudFormation stack in which this function will be deployed.
        :param name: The name of the function.
        """
        self.__name = name

        super().__init__(
            scope=scope,
            id=name,
            uuid=f'{name}-uuid',
            function_name=name,
            code=self.__code(),
            handler='index.handler',
            runtime=Runtime.PYTHON_3_8,
            timeout=Duration.minutes(1),
        )

        # Add permission to create deployments. Since this is a singleton lambda function,
        # we can not specify a specific api gateway resource.
        self.add_to_role_policy(
            PolicyStatement(actions=['apigateway:POST', 'apigateway:PATCH'],
                            resources=['*']))
 def new_codebuild_task(
         self, project: _codebuild.Project) -> _tasks.CodeBuildStartBuild:
     return _tasks.CodeBuildStartBuild(
         self,
         f"{project}task",
         project=project,
         timeout=Duration.minutes(10),
         integration_pattern=_step_fn.IntegrationPattern.RUN_JOB)
Ejemplo n.º 6
0
    def __init__(self, app: core.App, id: str, props, **kwargs) -> None:
        super().__init__(app, id, **kwargs)
        
        handler = lambda_.Function(
            self, "UrlShortenerFunction",
            code=aws_lambda.Code.asset("./lambda"),
            handler="lambda_function.lambda_handler",
            timeout=Duration.minutes(5),
            runtime=aws_lambda.Runtime.PYTHON_3_7)

        # define the API endpoint and associate the handler
        api = apig.LambdaRestApi(
            self, "IoTPgControlPlaneApi",
            handler=handler)
Ejemplo n.º 7
0
    def __init__(self, scope: Construct, id: str, functions: LambdaLib, **kwargs) -> None:
        super().__init__(scope, id)

        # Step Function
        submit_job = tasks.LambdaInvoke(self, "Submit Job",
            lambda_function=functions.send_email_approval,
            payload=sfn.TaskInput.from_object({'ExecutionContext.$': '$$'}),
            result_path=sfn.JsonPath.DISCARD
        )

        wait_x = sfn.Wait(self, "Wait",
            time= sfn.WaitTime.duration(Duration.minutes(2))
        )

        get_status = tasks.LambdaInvoke(self, "Get Job Status",
            lambda_function=functions.check_status_dynamo,
            payload=sfn.TaskInput.from_object({'ExecutionContext.$': '$$'}),
            result_path="$.status"
        )

        restrict_es = tasks.LambdaInvoke(self, "Restric ES Policy",
            lambda_function=functions.restric_es_policy,
            payload=sfn.TaskInput.from_object({'ExecutionContext.$': '$$'}),
        )

        restrict_rds = tasks.LambdaInvoke(self, "Restric RDS",
            lambda_function=functions.restric_rds_policy,
            payload=sfn.TaskInput.from_object({'ExecutionContext.$': '$$'}),
        )

        restrict_es_condition = sfn.Condition.string_equals("$.detail.additionalEventData.configRuleName", constants.CONFIG_RULE_ES_PUBLIC)
        restrict_rds_condition = sfn.Condition.string_equals("$.detail.additionalEventData.configRuleName", constants.CONFIG_RULE_RDS_PUBLIC)

        definition = (submit_job.next(wait_x)
                                .next(get_status)
                                .next(sfn.Choice(self, "Job Complete?")
                                .when(sfn.Condition.string_equals("$.status.Payload.status", "Rejected!"), wait_x)
                                # .when(sfn.Condition.string_equals("$.status.Payload.status", "NON_COMPLIANT"), final_task)
                                # .when(sfn.Condition.string_equals("$.status.Payload.status", "Accepted!"), final_task))
                                .otherwise(sfn.Choice(self, "Remediation Choice")
                                .when(restrict_es_condition, restrict_es)
                                .when(restrict_rds_condition, restrict_rds)))
                                )


        self.state_machine = sfn.StateMachine(self, "StateMachine",
            definition=definition,
            timeout=Duration.hours(2)
        )
Ejemplo n.º 8
0
    def __init__(self, scope: Construct, id: str, **kwarg) -> None:
        super().__init__(scope, id, **kwarg)

        # define the table that maps short codes to URLs.
        table = aws_dynamodb.Table(self, "Table",
                                   partition_key=aws_dynamodb.Attribute(
                                       name="id",
                                       type=aws_dynamodb.AttributeType.STRING),
                                   read_capacity=10,
                                   write_capacity=5)

        # define the API gateway request handler. all API requests will go to the same function.
        handler = aws_lambda.Function(self, "UrlShortenerFunction",
                                      code=aws_lambda.Code.asset("./lambda"),
                                      handler="handler.main",
                                      timeout=Duration.minutes(5),
                                      runtime=aws_lambda.Runtime.PYTHON_3_7)

        # generate the topic to publish to
        topic = aws_sns.Topic(self, "Topic", display_name="Url created topic")
        topic.add_subscription(aws_sns_subscriptions.EmailSubscription("*****@*****.**"))

        # pass the table name to the handler through an environment variable and grant
        # the handler read/write permissions on the table.
        handler.add_environment('TABLE_NAME', table.table_name)
        handler.add_environment('TOPIC_ARN', topic.topic_arn)
        table.grant_read_write_data(handler)
        topic.grant_publish(handler)

        # define the API endpoint and associate the handler
        api = aws_apigateway.LambdaRestApi(self, "UrlShortenerApi", handler=handler)

        # define the static website hosting
        frontendBucket = aws_s3.Bucket(self, "UrlShortenerWebsiteBucket",
                                       public_read_access=True,
                                       removal_policy=core.RemovalPolicy.DESTROY,
                                       website_index_document="index.html")

        deployment = aws_s3_deployment.BucketDeployment(self, "deployStaticWebsite",
                                                        sources=[aws_s3_deployment.Source.asset("./frontend")],
                                                        destination_bucket=frontendBucket)

        # define a Watchful monitoring system and watch the entire scope
        # this will automatically find all watchable resources and add
        # them to our dashboard
        wf = Watchful(self, 'watchful', alarm_email='*****@*****.**')
        wf.watch_scope(self)
 def new_build_project(self, repo: codecommit.Repository,
                       buildspec_path: str,
                       proj_name: str) -> _codebuild.Project:
     return _codebuild.Project(
         self,
         proj_name,
         badge=True,
         source=_codebuild.Source.code_commit(repository=repo),
         description=f"Build project for {proj_name}",
         environment=_codebuild.BuildEnvironment(
             build_image=_codebuild.LinuxBuildImage.STANDARD_5_0,
             compute_type=_codebuild.ComputeType.LARGE,
             privileged=True),
         project_name=proj_name,
         build_spec=_codebuild.BuildSpec.from_source_filename(
             filename=buildspec_path),
         timeout=Duration.minutes(10))
Ejemplo n.º 10
0
class Config:
    # Data bucket settings
    data_bucket_name = 'epg-data-s3-bucket-42'
    data_bucket_noncurrent_version_expiration = Duration.days(30)

    # Out bucket settings
    out_bucket_name = 'epg-out-s3-bucket-42'

    # Notifications
    email_recipient = '*****@*****.**'

    # Update function
    update_function_rate = Duration.minutes(5)
    error_count_to_notify = 12

    @staticmethod
    def period_to_check_error_count() -> Duration:
        return Duration.minutes(Config.update_function_rate.to_minutes() *
                                Config.error_count_to_notify * 2)
    def functions_for(
        self,
        name,
        base,
        handlers,
        libs=None,
        timeout=Duration.minutes(5),
        runtime=Runtime.PYTHON_3_8,
        layers=None,
    ) -> Dict[str, Function]:
        if isinstance(handlers, str):
            handlers = [handlers]
        if not isinstance(handlers, list):
            raise ValueError("handlers must be a string or a list of handlers")
        if isinstance(libs, str):
            libs = [libs]
        if isinstance(layers, str):
            layers = [layers]
        if libs and not isinstance(libs, list):
            raise ValueError("libs must be a string or a list of libraries")

        bundling = self._get_bundling(base, libs=libs)
        code = Code.from_asset(str(self.source_path), bundling=bundling)
        role = self.build_lambda_role(name)
        functions = {}
        for handler in handlers:
            func_name = name + handler.split(".")[0].replace(
                "_", " ").title().replace(" ", "").replace("Handler", "")
            functions.update({
                func_name:
                Function(
                    self,
                    func_name,
                    handler=handler,
                    code=code,
                    runtime=runtime,
                    timeout=timeout,
                    role=role,
                    layers=layers,
                    environment={"LOG_LEVEL": self.log_level},
                )
            })
        return functions
Ejemplo n.º 12
0
    def __init__(
        self,
        scope: Stack,
        name: str,
    ) -> None:
        self.__scope = scope
        self.__name = name

        super().__init__(
            scope=scope,
            id=name,
            uuid=f'{name}-uuid',
            function_name=name,
            code=self.__code(),
            handler='index.handler',
            runtime=Runtime.PYTHON_3_8,
            layers=[ElasticsearchLayer(scope, f'{name}ElasticsearchLayer')],
            environment={},
            timeout=Duration.minutes(10),
            role=self.role(),
        )
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        repo = codecommit.Repository(
            self,
            "repo",
            repository_name="demorepo",
            description="Repo to test PR with stepfunctions")

        proj1 = self.new_build_project(repo, "pr_specs/buildspec.yaml",
                                       "proj1")

        proj2 = _codebuild.Project(
            self,
            "proj_name",
            badge=True,
            description="Build project for ",
            environment=_codebuild.BuildEnvironment(
                build_image=_codebuild.LinuxBuildImage.STANDARD_5_0,
                compute_type=_codebuild.ComputeType.LARGE,
                privileged=True),
            project_name="proj_name",
            build_spec=_codebuild.BuildSpec.from_source_filename(
                filename="pr_specs/buildspec2.yaml"),
            timeout=Duration.minutes(10),
        )

        input_task = _step_fn.Pass(self, "passstate")

        proj1_tasks = self.new_codebuild_task(proj1)
        proj2_tasks = self.new_codebuild_task(proj2)

        definition = input_task.next(proj1_tasks).next(proj2_tasks)

        _fn = _step_fn.StateMachine(
            self,
            "statemachine",
            definition=definition,
            state_machine_name="statemachine",
        )
Ejemplo n.º 14
0
    def __init__(
            self,
            app: App,
            id: str,
            app_name: str,
            metrics: list) -> None:
        super().__init__(app, id)
        for metric in metrics:
            operation = metric[0]
            metric_type = metric[1]
            color = metric[3]

            dim = {
                'By App Version': os.environ['APP_VERSION'],
                'By Operation': operation,
                'By Stage': stage}

            Metric(metric_name=get_metric_name(metric=metric),
                   namespace=app_name,
                   color=color,
                   dimensions=dim,
                   period=Duration.minutes(1),
                   unit=Unit.MILLISECONDS)
Ejemplo n.º 15
0
    def __init__(self, scope: Construct, id: str, **kwarg) -> None:
        super().__init__(scope, id, **kwarg)

        # define the table that maps short codes to URLs.
        table = aws_dynamodb.Table(self, "Table",
                                   partition_key=aws_dynamodb.Attribute(
                                       name="id",
                                       type=aws_dynamodb.AttributeType.STRING),
                                   read_capacity=10,
                                   write_capacity=5)

        # define the API gateway request handler. all API requests will go to the same function.
        handler = aws_lambda.Function(self, "UrlShortenerFunction",
                                      code=aws_lambda.Code.asset("./lambda"),
                                      handler="handler.main",
                                      timeout=Duration.minutes(5),
                                      runtime=aws_lambda.Runtime.PYTHON_3_7)

        # pass the table name to the handler through an environment variable and grant
        # the handler read/write permissions on the table.
        handler.add_environment('TABLE_NAME', table.table_name)
        table.grant_read_write_data(handler)

        # define the API endpoint and associate the handler
        api = aws_apigateway.LambdaRestApi(self, "UrlShortenerApi",
                                           handler=handler)

        # map go.waltersco.co to this api gateway endpoint
        # the domain name is a shared resource that can be accessed through the API in WaltersCoStack
        # NOTE: you can comment this out if you want to bypass the domain name mapping
        self.map_waltersco_subdomain('go', api)

        # define a Watchful monitoring system and watch the entire scope
        # this will automatically find all watchable resources and add
        # them to our dashboard
        wf = Watchful(self, 'watchful', alarm_email='*****@*****.**')
        wf.watch_scope(self)
Ejemplo n.º 16
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 stream: aws_kinesis.IStream, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        sample_device_producer = aws_lambda_python.PythonFunction(
            self,
            'SampleDeviceProducer',
            entry='stacks/sample_kinesis_stream_producer/producer_lambda',
            index='app.py',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            timeout=core.Duration.seconds(30))

        stream.grant_write(sample_device_producer)

        lambda_input = {"Stream": stream.stream_name}
        Rule(self,
             'ProducerTriggerEventRule',
             enabled=True,
             schedule=Schedule.rate(Duration.minutes(1)),
             targets=[
                 aws_events_targets.LambdaFunction(
                     handler=sample_device_producer,
                     event=RuleTargetInput.from_object(lambda_input))
             ])
Ejemplo n.º 17
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        table = aws_dynamodb.Table(self, "DashboardModel",
            partition_key=aws_dynamodb.Attribute(name="Pk", type=aws_dynamodb.AttributeType.STRING),
            sort_key=aws_dynamodb.Attribute(name="Sk", type=aws_dynamodb.AttributeType.STRING),
            billing_mode=aws_dynamodb.BillingMode.PAY_PER_REQUEST)
        
        kds_input_stream = aws_kinesis.Stream(self, "kds_dashboard_input_stream",
            shard_count=1, 
            stream_name="kds_dashboard_input_stream")
        
        kds_output_stream = aws_kinesis.Stream(self, "kds_dashboard_output_stream",
            shard_count=1, 
            stream_name="kds_dashboard_output_stream")

        # Creating a ingest bucket for this stack
        ingest_bucket = aws_s3.Bucket(self,'dreis_dboard_ingest_bucket')

        kfh_service_role = aws_iam.Role(self, 'KFH_Dashboard_Role',
            assumed_by=aws_iam.ServicePrincipal('firehose.amazonaws.com')
        )

        kfh_policy_stmt = aws_iam.PolicyStatement(
            actions=["*"],
            resources=["*"]
        )

        kfh_service_role.add_to_policy(kfh_policy_stmt)

        #Creating firehose for this stack
        kfh_source = aws_kinesisfirehose.CfnDeliveryStream.KinesisStreamSourceConfigurationProperty(
            kinesis_stream_arn=kds_input_stream.stream_arn,
            role_arn=kfh_service_role.role_arn
        )

        kfh_datalake = aws_kinesisfirehose.CfnDeliveryStream(self, "kfh_datalake",
            s3_destination_configuration=aws_kinesisfirehose.CfnDeliveryStream.S3DestinationConfigurationProperty(
                bucket_arn=ingest_bucket.bucket_arn,
                buffering_hints=aws_kinesisfirehose.CfnDeliveryStream.BufferingHintsProperty(
                    interval_in_seconds=60,
                    size_in_m_bs=5),
                compression_format="UNCOMPRESSED",
                role_arn=kfh_service_role.role_arn
                ),
            delivery_stream_type="KinesisStreamAsSource",
            kinesis_stream_source_configuration=kfh_source
        )

        kda_service_role = aws_iam.Role(self, 'KDA_Dashboard_Role',
            assumed_by=aws_iam.ServicePrincipal('kinesisanalytics.amazonaws.com')
        )

        kda_policy_stmt = aws_iam.PolicyStatement(
            actions=["*"],
            resources=["*"]
        )

        kda_service_role.add_to_policy(kda_policy_stmt)

        # KA doesn't like - (dash) in names
        col1 = aws_kinesisanalytics.CfnApplication.RecordColumnProperty(
            name="state",
            sql_type="VARCHAR(2)",
            mapping="$.state"
        )

        col2 = aws_kinesisanalytics.CfnApplication.RecordColumnProperty(
            name="event_time",
            sql_type="TIMESTAMP",
            mapping="$.event-time"
        )
        
        col3 = aws_kinesisanalytics.CfnApplication.RecordColumnProperty(
            name="region",  
            sql_type="VARCHAR(12)",
            mapping="$.region"
        )

        col4 = aws_kinesisanalytics.CfnApplication.RecordColumnProperty(
            name="store_id",
            sql_type="INTEGER",
            mapping="$.store-id"
        )

        col5 = aws_kinesisanalytics.CfnApplication.RecordColumnProperty(
            name="kpi_1",
            sql_type="INTEGER",
            mapping="$.kpi-1"
        )
        
        col6 = aws_kinesisanalytics.CfnApplication.RecordColumnProperty(
            name="kpi_2",
            sql_type="INTEGER",
            mapping="$.kpi-2"
        )

        col7 = aws_kinesisanalytics.CfnApplication.RecordColumnProperty(
            name="kpi_3",
            sql_type="INTEGER",
            mapping="$.kpi-3"
        )

        col8 = aws_kinesisanalytics.CfnApplication.RecordColumnProperty(
            name="kpi_4",
            sql_type="INTEGER",
            mapping="$.kpi-4"
        )

        col9 = aws_kinesisanalytics.CfnApplication.RecordColumnProperty(
            name="kpi_5",
            sql_type="INTEGER",
            mapping="$.kpi-5"
        )

        schema = aws_kinesisanalytics.CfnApplication.InputSchemaProperty(
            record_columns=[col2, col1, col3, col4, col5, col6, col7, col8, col9],
            record_encoding="UTF-8",
            record_format=aws_kinesisanalytics.CfnApplication.RecordFormatProperty(
                record_format_type="JSON",
                mapping_parameters=aws_kinesisanalytics.CfnApplication.MappingParametersProperty(
                    json_mapping_parameters=aws_kinesisanalytics.CfnApplication.JSONMappingParametersProperty(
                        record_row_path="$"
                    )
                )
            )
        )

        kda_is = aws_kinesisanalytics.CfnApplication.KinesisStreamsInputProperty(
            resource_arn=kds_input_stream.stream_arn,
            role_arn=kda_service_role.role_arn
        )

        ip = aws_kinesisanalytics.CfnApplication.InputProperty(
            name_prefix="SOURCE_SQL_STREAM",
            input_schema=schema,
            kinesis_streams_input=kda_is
        )

        application_code = "CREATE OR REPLACE STREAM \"DESTINATION_SQL_STREAM_BY_STORE\" (\"region\" VARCHAR(10), \"state\" VARCHAR(2), \"store-id\" INTEGER, kpi_1_sum INTEGER,  kpi_2_sum INTEGER, ingest_time TIMESTAMP);" + \
            "CREATE OR REPLACE STREAM \"DESTINATION_SQL_STREAM_BY_STATE\" (\"region\" VARCHAR(10), \"state\" VARCHAR(2), kpi_1_sum INTEGER,  kpi_2_sum INTEGER, ingest_time TIMESTAMP);" + \
            "CREATE OR REPLACE STREAM \"DESTINATION_SQL_STREAM_BY_REGION\" (\"region\" VARCHAR(10), kpi_1_sum INTEGER,  kpi_2_sum INTEGER, ingest_time TIMESTAMP);" + \
            "CREATE OR REPLACE PUMP \"STREAM_PUMP\" AS INSERT INTO \"DESTINATION_SQL_STREAM_BY_STORE\"" + \
            "SELECT STREAM \"region\", \"state\", \"store-id\", SUM(\"kpi-1\") AS kpi_1_sum, SUM(\"kpi-2\") AS kpi_2_sum, FLOOR(\"SOURCE_SQL_STREAM_001\".APPROXIMATE_ARRIVAL_TIME TO MINUTE) as ingest_time" + \
            "FROM \"SOURCE_SQL_STREAM_001\"" + \
            "GROUP BY \"region\", \"state\", \"store-id\", FLOOR(\"SOURCE_SQL_STREAM_001\".APPROXIMATE_ARRIVAL_TIME TO MINUTE), FLOOR((\"SOURCE_SQL_STREAM_001\".ROWTIME - TIMESTAMP '1970-01-01 00:00:00') SECOND / 10 TO SECOND);" + \
            "CREATE OR REPLACE PUMP \"STREAM_PUMP\" AS INSERT INTO \"DESTINATION_SQL_STREAM_BY_STATE\"" + \
            "SELECT STREAM \"region\", \"state\", SUM(\"kpi-1\") AS kpi_1_sum, SUM(\"kpi-2\") AS kpi_2_sum, FLOOR(\"SOURCE_SQL_STREAM_001\".APPROXIMATE_ARRIVAL_TIME TO MINUTE) as ingest_time" + \
            "FROM \"SOURCE_SQL_STREAM_001\"" + \
            "GROUP BY \"region\", \"state\", FLOOR(\"SOURCE_SQL_STREAM_001\".APPROXIMATE_ARRIVAL_TIME TO MINUTE), FLOOR((\"SOURCE_SQL_STREAM_001\".ROWTIME - TIMESTAMP '1970-01-01 00:00:00') SECOND / 10 TO SECOND);" + \
            "CREATE OR REPLACE PUMP \"STREAM_PUMP\" AS INSERT INTO \"DESTINATION_SQL_STREAM_BY_REGION\"" + \
            "SELECT STREAM \"region\", SUM(\"kpi-1\") AS kpi_1_sum, SUM(\"kpi-2\") AS kpi_2_sum, FLOOR(\"SOURCE_SQL_STREAM_001\".APPROXIMATE_ARRIVAL_TIME TO MINUTE) as ingest_time" + \
            "FROM \"SOURCE_SQL_STREAM_001\"" + \
            "GROUP BY \"region\", FLOOR(\"SOURCE_SQL_STREAM_001\".APPROXIMATE_ARRIVAL_TIME TO MINUTE), FLOOR((\"SOURCE_SQL_STREAM_001\".ROWTIME - TIMESTAMP '1970-01-01 00:00:00') SECOND / 10 TO SECOND);"

        kda_app = aws_kinesisanalytics.CfnApplication(self, "kda_agg",
            inputs=[ip], #kda_inputs,
            application_code=application_code, 
            application_description="Aggregating data", 
            application_name="DashboardMetricsAggregator"
        )

        kda_output_prop = aws_kinesisanalytics.CfnApplicationOutput.KinesisStreamsOutputProperty(
            resource_arn=kds_output_stream.stream_arn,
            role_arn=kda_service_role.role_arn
        )

        kda_dest_schema = aws_kinesisanalytics.CfnApplicationOutput.DestinationSchemaProperty(
            record_format_type="JSON"
        )

        kda_output_prop_by_store = aws_kinesisanalytics.CfnApplicationOutput.OutputProperty(
            destination_schema=kda_dest_schema,
            kinesis_streams_output=kda_output_prop,
            name="DESTINATION_SQL_STREAM_BY_STORE"
        )

        kda_output_prop_by_state = aws_kinesisanalytics.CfnApplicationOutput.OutputProperty(
            destination_schema=kda_dest_schema,
            kinesis_streams_output=kda_output_prop,
            name="DESTINATION_SQL_STREAM_BY_STATE"
        )

        kda_output_prop_by_region = aws_kinesisanalytics.CfnApplicationOutput.OutputProperty(
            destination_schema=kda_dest_schema,
            kinesis_streams_output=kda_output_prop,
            name="DESTINATION_SQL_STREAM_BY_REGION"
        )

        kda_app_output_prop = aws_kinesisanalytics.CfnApplicationOutput(self, "kda_agg_output_store",
            application_name="DashboardMetricsAggregator",
            output=kda_output_prop_by_store
        )

        kda_app_output_prop = aws_kinesisanalytics.CfnApplicationOutput(self, "kda_agg_output_state",
            application_name="DashboardMetricsAggregator",
            output=kda_output_prop_by_state
        )

        kda_app_output_prop = aws_kinesisanalytics.CfnApplicationOutput(self, "kda_agg_output_region",
            application_name="DashboardMetricsAggregator",
            output=kda_output_prop_by_region
        )

        lambda_agg_function = aws_lambda.Function(self, "AggDataLambda",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            handler="lambda_function.lambda_handler",
            code=aws_lambda.Code.asset("../models/dashboard/lambdas/aggregate_data_lambda"),
            timeout=Duration.minutes(5))

        lambda_agg_function.add_environment("DDB_TABLE_DASHBOARD", table.table_name)

        lambda_agg_function.add_to_role_policy(aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "kinesis:*"
            ],
            resources=["*"]
        ))

        table.grant_read_write_data(lambda_agg_function)

        kes = aws_lambda_event_sources.KinesisEventSource(kds_output_stream,
            starting_position=aws_lambda.StartingPosition.TRIM_HORIZON,
            batch_size=50, 
            #max_batching_window=100
        )

        lambda_agg_function.add_event_source(kes)

        core.CfnOutput(
            self, "TableName_Dashboard",
            description="Table name for Dashboard",
            value=table.table_name
        )

        core.CfnOutput(
            self, "BucketName_Dashboard",
            description="Bucket name",
            value=ingest_bucket.bucket_arn
        )

        core.CfnOutput(
            self, "KinesisInputStream_Dashboard",
            description="Kinesis input for Dashboard",
            value=kds_input_stream.stream_name
        )

        core.CfnOutput(
            self, "KinesisOutputStream_Dashboard",
            description="Kinesis output for Dashboard",
            value=kds_output_stream.stream_name
        )
        
Ejemplo n.º 18
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        policy_name: str,
        policy_document: Any,
        timeout: Duration = None
    ) -> None:
        super().__init__(scope, id)

        if type(policy_document) == dict:
            policy_document = json.dumps(policy_document)

        account_id = Stack.of(self).account
        region = Stack.of(self).region

        # IMPORTANT! Setting resources to the exact policy name is the most restrictive we can be, but this will cause issues 
        # When trying to update the policy name. 
        # See this issue for more info: https://github.com/aws/aws-cdk/issues/14037
        # A possible work around is setting resources to 'arn:aws:iot:{region}:{account_id}:policy/*', which is more permissive.
        lambda_role = iam.Role(
            scope=self,
            id=f'{id}LambdaRole',
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            inline_policies={
                "IotPolicyProvisioningPolicy":
                    iam.PolicyDocument(statements=[
                        iam.PolicyStatement(
                            actions=[
                                "iot:ListPolicyVersions", "iot:CreatePolicy", "iot:CreatePolicyVersion", "iot:DeletePolicy",
                                "iot:DeletePolicyVersion", "iot:GetPolicy"
                            ],
                            resources=[f'arn:aws:iot:{region}:{account_id}:policy/{policy_name}'],
                            effect=iam.Effect.ALLOW,
                        )
                    ])
            },
            managed_policies=[iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole")],
        )

        if not timeout:
            timeout = Duration.minutes(5)

        with open(path.join(path.dirname(__file__), 'iot_policy_event_handler.py')) as file:
            code = file.read()

        event_handler = aws_lambda.Function(
            scope=self,
            id=f'{id}EventHandler',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            code=aws_lambda.Code.from_inline(code),
            handler='index.on_event',
            role=lambda_role,
            timeout=timeout,
        )

        with open(path.join(path.dirname(__file__), 'iot_policy_is_complete_handler.py')) as file:
            is_complete_code = file.read()

        is_complete_handler = aws_lambda.Function(
            scope=self,
            id=f'{id}IsCompleteHandler',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            code=aws_lambda.Code.from_inline(is_complete_code),
            handler='index.is_complete',
            role=lambda_role,
        )

        provider = Provider(
            scope=self, 
            id=f'{id}Provider', 
            on_event_handler=event_handler,
            is_complete_handler=is_complete_handler, 
            query_interval=Duration.minutes(2),
        )

        core.CustomResource(
            scope=self,
            id=f'{id}IotPolicy',
            service_token=provider.service_token,
            removal_policy=RemovalPolicy.DESTROY,
            resource_type="Custom::IotPolicyAsync",
            properties={
                "policy_name": policy_name,
                "policy_document": policy_document,
            },
        )
Ejemplo n.º 19
0
    def __init__(
        self,
        scope: Construct,
        id: str,
        elasticsearch_index: ElasticsearchIndexResource,
        dynamodb_table: Table,
        kms_key: Optional[Key] = None,
    ) -> None:
        super().__init__(scope=scope, id=id)

        elasticsearch_layer = BElasticsearchLayer(
            scope=self, name=f"{id}ElasticsearchLayer")

        initial_cloner_function = SingletonFunction(
            scope=self,
            id="InitialClonerFunction",
            uuid="e01116a4-f939-43f2-8f5b-cc9f862c9e01",
            lambda_purpose="InitialClonerSingletonLambda",
            code=Code.from_asset(initial_cloner_root),
            handler="index.handler",
            runtime=Runtime.PYTHON_3_8,
            layers=[elasticsearch_layer],
            log_retention=RetentionDays.ONE_MONTH,
            memory_size=128,
            timeout=Duration.minutes(15),
            role=Role(
                scope=self,
                id="InitialClonerFunctionRole",
                assumed_by=ServicePrincipal("lambda.amazonaws.com"),
                inline_policies={
                    "LogsPolicy":
                    PolicyDocument(statements=[
                        PolicyStatement(
                            actions=[
                                "logs:CreateLogGroup",
                                "logs:CreateLogStream",
                                "logs:PutLogEvents",
                                "logs:DescribeLogStreams",
                            ],
                            resources=["arn:aws:logs:*:*:*"],
                            effect=Effect.ALLOW,
                        )
                    ]),
                    "ElasticsearchPolicy":
                    PolicyDocument(statements=[
                        PolicyStatement(
                            actions=[
                                "es:ESHttpDelete",
                                "es:ESHttpGet",
                                "es:ESHttpHead",
                                "es:ESHttpPatch",
                                "es:ESHttpPost",
                                "es:ESHttpPut",
                            ],
                            resources=["*"],
                            effect=Effect.ALLOW,
                        )
                    ]),
                    "DynamodbPolicy":
                    PolicyDocument(statements=[
                        PolicyStatement(
                            actions=["dynamodb:*"],
                            resources=["*"],
                            effect=Effect.ALLOW,
                        )
                    ]),
                },
                description="Role for DynamoDB Initial Cloner Function",
            ),
        )

        if kms_key:
            initial_cloner_function.add_to_role_policy(
                PolicyStatement(
                    actions=["kms:Decrypt"],
                    resources=[kms_key.key_arn],
                    effect=Effect.ALLOW,
                ), )

        initial_cloner = CustomResource(
            scope=self,
            id="InitialCloner",
            service_token=initial_cloner_function.function_arn,
            removal_policy=RemovalPolicy.DESTROY,
            properties={
                "DynamodbTableName":
                dynamodb_table.table_name,
                "ElasticsearchIndexName":
                elasticsearch_index.index_name,
                "ElasticsearchEndpoint":
                elasticsearch_index.elasticsearch_domain.domain_endpoint,
            },
            resource_type="Custom::ElasticsearchInitialCloner",
        )

        primary_key_field = initial_cloner.get_att_string("PrimaryKeyField")

        dynamodb_stream_arn = dynamodb_table.table_stream_arn
        if not dynamodb_stream_arn:
            raise Exception("DynamoDB streams must be enabled for the table")

        dynamodb_event_source = DynamoEventSource(
            table=dynamodb_table,
            starting_position=StartingPosition.LATEST,
            enabled=True,
            max_batching_window=Duration.seconds(10),
            bisect_batch_on_error=True,
            parallelization_factor=2,
            batch_size=1000,
            retry_attempts=10,
        )

        cloner_function = Function(
            scope=self,
            id="ClonerFunction",
            code=Code.from_asset(cloner_root),
            handler="index.handler",
            runtime=Runtime.PYTHON_3_8,
            environment={
                "ES_INDEX_NAME": elasticsearch_index.index_name,
                "ES_DOMAIN_ENDPOINT":
                elasticsearch_index.elasticsearch_domain.domain_endpoint,
                "PRIMARY_KEY_FIELD": primary_key_field,
            },
            events=[dynamodb_event_source],
            layers=[elasticsearch_layer],
            log_retention=RetentionDays.ONE_MONTH,
            memory_size=128,
            role=Role(
                scope=self,
                id="ClonerFunctionRole",
                assumed_by=ServicePrincipal("lambda.amazonaws.com"),
                inline_policies={
                    "LogsPolicy":
                    PolicyDocument(statements=[
                        PolicyStatement(
                            actions=[
                                "logs:CreateLogGroup",
                                "logs:CreateLogStream",
                                "logs:PutLogEvents",
                                "logs:DescribeLogStreams",
                            ],
                            resources=["arn:aws:logs:*:*:*"],
                            effect=Effect.ALLOW,
                        )
                    ]),
                    "ElasticsearchPolicy":
                    PolicyDocument(statements=[
                        PolicyStatement(
                            actions=[
                                "es:ESHttpDelete",
                                "es:ESHttpGet",
                                "es:ESHttpHead",
                                "es:ESHttpPatch",
                                "es:ESHttpPost",
                                "es:ESHttpPut",
                            ],
                            resources=[
                                f"{elasticsearch_index.elasticsearch_domain.domain_arn}/*"
                            ],
                            effect=Effect.ALLOW,
                        )
                    ]),
                    "DynamodbStreamsPolicy":
                    PolicyDocument(statements=[
                        PolicyStatement(
                            actions=[
                                "dynamodb:DescribeStream",
                                "dynamodb:GetRecords",
                                "dynamodb:GetShardIterator",
                                "dynamodb:ListStreams",
                            ],
                            resources=[dynamodb_stream_arn],
                            effect=Effect.ALLOW,
                        )
                    ]),
                },
                description="Role for DynamoDB Cloner Function",
            ),
            timeout=Duration.seconds(30),
        )

        if kms_key:
            cloner_function.add_to_role_policy(
                PolicyStatement(
                    actions=["kms:Decrypt"],
                    resources=[kms_key.key_arn],
                    effect=Effect.ALLOW,
                ), )
Ejemplo n.º 20
0
    def __init__(self, scope: Construct, stack_id: str, *, props: SEPStackProps, **kwargs):
        """
        Initialize a new instance of SEPStack
        :param scope: The scope of this construct.
        :param stack_id: The ID of this construct.
        :param props: The properties for this construct.
        :param kwargs: Any kwargs that need to be passed on to the parent class.
        """
        super().__init__(scope, stack_id, **kwargs)

         # The VPC that all components of the render farm will be created in.
        vpc = Vpc(
            self,
            'Vpc',
            max_azs=2,
        )

        recipes = ThinkboxDockerRecipes(
            self,
            'Image',
            stage=Stage.from_directory(props.docker_recipes_stage_path),
        )

        repository = Repository(
            self,
            'Repository',
            vpc=vpc,
            version=recipes.version,
            repository_installation_timeout=Duration.minutes(20),
            # TODO - Evaluate deletion protection for your own needs. These properties are set to RemovalPolicy.DESTROY
            # to cleanly remove everything when this stack is destroyed. If you would like to ensure
            # that these resources are not accidentally deleted, you should set these properties to RemovalPolicy.RETAIN
            # or just remove the removal_policy parameter.
            removal_policy=RepositoryRemovalPolicies(
                database=RemovalPolicy.DESTROY,
                filesystem=RemovalPolicy.DESTROY,
            ),
        )

        host = 'renderqueue'
        zone_name = 'deadline-test.internal'

        # Internal DNS zone for the VPC.
        dns_zone = PrivateHostedZone(
            self,
            'DnsZone',
            vpc=vpc,
            zone_name=zone_name,
        )

        ca_cert = X509CertificatePem(
            self,
            'RootCA',
            subject=DistinguishedName(
                cn='SampleRootCA',
            ),
        )

        server_cert = X509CertificatePem(
            self,
            'RQCert',
            subject=DistinguishedName(
                cn=f'{host}.{dns_zone.zone_name}',
                o='RFDK-Sample',
                ou='RenderQueueExternal',
            ),
            signing_certificate=ca_cert,
        )

        render_queue = RenderQueue(
            self,
            'RenderQueue',
            vpc=vpc,
            version=recipes.version,
            images=recipes.render_queue_images,
            repository=repository,
            # TODO - Evaluate deletion protection for your own needs. This is set to false to
            # cleanly remove everything when this stack is destroyed. If you would like to ensure
            # that this resource is not accidentally deleted, you should set this to true.
            deletion_protection=False,
            hostname=RenderQueueHostNameProps(
                hostname=host,
                zone=dns_zone,
            ),
            traffic_encryption=RenderQueueTrafficEncryptionProps(
                external_tls=RenderQueueExternalTLSProps(
                    rfdk_certificate=server_cert,
                ),
                internal_protocol=ApplicationProtocol.HTTPS,
            ),
        )

        if props.create_resource_tracker_role:
            # Creates the Resource Tracker Access role. This role is required to exist in your account so the resource tracker will work properly
            Role(
                self,
                'ResourceTrackerRole',
                assumed_by=ServicePrincipal('lambda.amazonaws.com'),
                managed_policies= [ManagedPolicy.from_aws_managed_policy_name('AWSThinkboxDeadlineResourceTrackerAccessPolicy')],
                role_name= 'DeadlineResourceTrackerAccessRole',
            )

        fleet = SpotEventPluginFleet(
            self,
            'SpotEventPluginFleet',
            vpc=vpc,
            render_queue=render_queue,
            deadline_groups=['group_name'],
            instance_types=[InstanceType.of(InstanceClass.BURSTABLE3, InstanceSize.LARGE)],
            worker_machine_image=props.worker_machine_image,
            max_capacity=1,
        )

        # Optional: Add additional tags to both spot fleet request and spot instances.
        Tags.of(fleet).add('name', 'SEPtest')

        ConfigureSpotEventPlugin(
            self,
            'ConfigureSpotEventPlugin',
            vpc=vpc,
            render_queue=render_queue,
            spot_fleets=[fleet],
            configuration=SpotEventPluginSettings(
                enable_resource_tracker=True,
            ),
        )
Ejemplo n.º 21
0
    def __init__(self, scope: Construct, stack_id: str, *,
                 props: ServiceTierProps, **kwargs):
        """
        Initialize a new instance of ServiceTier
        :param scope: The scope of this construct.
        :param stack_id: The ID of this construct.
        :param props: The properties for this construct.
        :param kwargs: Any kwargs that need to be passed on to the parent class.
        """
        super().__init__(scope, stack_id, **kwargs)

        # Bastion instance for convenience (e.g. SSH into RenderQueue and WorkerFleet instances).
        # Not a critical component of the farm, so this can be safely removed. An alternative way
        # to access your hosts is also provided by the Session Manager, which is also configured
        # later in this example.
        self.bastion = BastionHostLinux(
            self,
            'Bastion',
            vpc=props.vpc,
            subnet_selection=SubnetSelection(
                subnet_group_name=subnets.PUBLIC.name),
            block_devices=[
                BlockDevice(device_name='/dev/xvda',
                            volume=BlockDeviceVolume.ebs(50, encrypted=True))
            ])

        # Mounting the root of the EFS file-system to the bastion access for convenience.
        # This can safely be removed.
        MountableEfs(self, filesystem=props.mountable_file_system.file_system
                     ).mount_to_linux_instance(self.bastion.instance,
                                               location='/mnt/efs')

        self.version = VersionQuery(self,
                                    'Version',
                                    version=props.deadline_version)

        secrets_management_settings = SecretsManagementProps(
            enabled=props.enable_secrets_management)
        if props.enable_secrets_management and props.secrets_management_secret_arn is not None:
            secrets_management_settings[
                "credentials"] = Secret.from_secret_arn(
                    self, 'SMAdminUser', props.secrets_management_secret_arn)

        repository = Repository(
            self,
            'Repository',
            vpc=props.vpc,
            vpc_subnets=SubnetSelection(
                subnet_group_name=subnets.INFRASTRUCTURE.name),
            database=props.database,
            file_system=props.mountable_file_system,
            repository_installation_timeout=Duration.minutes(20),
            repository_installation_prefix='/',
            version=self.version,
            secrets_management_settings=secrets_management_settings)

        images = ThinkboxDockerImages(
            self,
            'Images',
            version=self.version,
            user_aws_thinkbox_eula_acceptance=props.accept_aws_thinkbox_eula)

        server_cert = X509CertificatePem(
            self,
            'RQCert',
            subject=DistinguishedName(
                cn=f'renderqueue.{props.dns_zone.zone_name}',
                o='RFDK-Sample',
                ou='RenderQueueExternal'),
            signing_certificate=props.root_ca)

        self.render_queue = RenderQueue(
            self,
            'RenderQueue',
            vpc=props.vpc,
            vpc_subnets=SubnetSelection(
                subnet_group_name=subnets.INFRASTRUCTURE.name),
            # It is considered good practice to put the Render Queue's load blanacer in dedicated subnets because:
            #
            # 1. Deadline Secrets Management identity registration settings will be scoped down to least-privilege
            #
            #    (see https://github.com/aws/aws-rfdk/blob/release/packages/aws-rfdk/lib/deadline/README.md#render-queue-subnet-placement)
            #
            # 2. The load balancer can scale to use IP addresses in the subnet without conflicts from other AWS
            #    resources
            #
            #    (see https://docs.aws.amazon.com/elasticloadbalancing/latest/application/application-load-balancers.html#subnets-load-balancer)
            vpc_subnets_alb=SubnetSelection(
                subnet_group_name=subnets.RENDER_QUEUE_ALB.name),
            images=images,
            repository=repository,
            hostname=RenderQueueHostNameProps(hostname='renderqueue',
                                              zone=props.dns_zone),
            traffic_encryption=RenderQueueTrafficEncryptionProps(
                external_tls=RenderQueueExternalTLSProps(
                    rfdk_certificate=server_cert),
                internal_protocol=ApplicationProtocol.HTTPS),
            version=self.version,
            # TODO - Evaluate deletion protection for your own needs. This is set to false to
            # cleanly remove everything when this stack is destroyed. If you would like to ensure
            # that this resource is not accidentally deleted, you should set this to true.
            deletion_protection=False,
            # Enable a local transparent filesystem cache of the Repository filesystem to reduce
            # data traffic from the Repository's filesystem.
            # For an EFS and NFS filesystem, this requires the 'fsc' mount option.
            enable_local_file_caching=True,
        )
        self.render_queue.connections.allow_default_port_from(self.bastion)

        # This is an optional feature that will set up your EC2 instances to be enabled for use with
        # the Session Manager. RFDK deploys EC2 instances that aren't available through a public subnet,
        # so connecting to them by SSH isn't easy. This is an option to quickly access hosts without
        # using a bastion instance.
        # It's important to note that the permissions need to be granted to the render queue's ASG,
        # rather than the render queue itself.
        SessionManagerHelper.grant_permissions_to(self.render_queue.asg)

        if props.ubl_licenses:
            if not props.ubl_certs_secret_arn:
                raise ValueError(
                    'UBL certificates secret ARN is required when using UBL but was not specified.'
                )
            ubl_cert_secret = Secret.from_secret_arn(
                self, 'ublcertssecret', props.ubl_certs_secret_arn)
            self.ubl_licensing = UsageBasedLicensing(
                self,
                'UsageBasedLicensing',
                vpc=props.vpc,
                vpc_subnets=SubnetSelection(
                    subnet_group_name=subnets.USAGE_BASED_LICENSING.name),
                images=images,
                licenses=props.ubl_licenses,
                render_queue=self.render_queue,
                certificate_secret=ubl_cert_secret,
            )

            # Another optional usage of the SessionManagerHelper that demonstrates how to configure the UBL
            # construct's ASG for access. Note that this construct also requires you to apply the permissions
            # to its ASG property.
            SessionManagerHelper.grant_permissions_to(self.ubl_licensing.asg)
        else:
            self.ubl_licensing = None
Ejemplo n.º 22
0
 def period_to_check_error_count() -> Duration:
     return Duration.minutes(Config.update_function_rate.to_minutes() *
                             Config.error_count_to_notify * 2)
Ejemplo n.º 23
0
    def __init__(self, scope: Construct, stack_id: str, *, props: ServiceTierProps, **kwargs):
        """
        Initialize a new instance of ServiceTier
        :param scope: The scope of this construct.
        :param stack_id: The ID of this construct.
        :param props: The properties for this construct.
        :param kwargs: Any kwargs that need to be passed on to the parent class.
        """
        super().__init__(scope, stack_id, **kwargs)

        self.version = VersionQuery(
            self,
            'Version',
            version=props.deadline_version
        )

        # We are excluding the local zones from the Repository. This construct will create an
        # EFS filesystem and DocDB cluster, both of which aren't available in any local zones at this time.
        repository_subnets = SubnetSelection(
            availability_zones=props.availability_zones,
            subnet_type=SubnetType.PRIVATE
        )
        repository = Repository(
            self,
            'Repository',
            vpc=props.vpc,
            repository_installation_timeout=Duration.minutes(20),
            removal_policy=RepositoryRemovalPolicies(
                database=RemovalPolicy.DESTROY,
                filesystem=RemovalPolicy.DESTROY,
            ),
            version=self.version,
            vpc_subnets=repository_subnets
        )

        images = ThinkboxDockerImages(
            self,
            'Images',
            version=self.version,
            user_aws_thinkbox_eula_acceptance=props.accept_aws_thinkbox_eula
        )

        server_cert = X509CertificatePem(
            self,
            'RQCert',
            subject=DistinguishedName(
                cn=f'renderqueue.{props.dns_zone.zone_name}',
                o='RFDK-Sample',
                ou='RenderQueueExternal'
            ),
            signing_certificate=props.root_ca
        )

        # The render queue is also put only in the standard availability zones. The service itself
        # is run in a single zone, while the load balancer that sits in front of it can be provided
        # all the standard zones we're using.
        render_queue_subnets = SubnetSelection(
            availability_zones=[props.availability_zones[0]],
            subnet_type=SubnetType.PRIVATE
        )
        render_queue_alb_subnets = SubnetSelection(
            availability_zones=props.availability_zones,
            subnet_type=SubnetType.PRIVATE,
            one_per_az=True,
        )
        self.render_queue = RenderQueue(
            self,
            'RenderQueue',
            vpc=props.vpc,
            images=images,
            repository=repository,
            hostname=RenderQueueHostNameProps(
                hostname='renderqueue',
                zone=props.dns_zone
            ),
            traffic_encryption=RenderQueueTrafficEncryptionProps(
                external_tls=RenderQueueExternalTLSProps(
                    rfdk_certificate=server_cert
                ),
                internal_protocol=ApplicationProtocol.HTTPS
            ),
            version=self.version,
            vpc_subnets=render_queue_subnets,
            vpc_subnets_alb=render_queue_alb_subnets,
            deletion_protection=False
        )
        SessionManagerHelper.grant_permissions_to(self.render_queue.asg)
Ejemplo n.º 24
0
    def create_jwt_secret(
        self,
        master_secret: secretsmanager.Secret,
        ica_base_url: str,
        key_name: str,
        project_ids: Union[str, List[str]],
    ) -> Tuple[secretsmanager.Secret, lambda_.Function]:
        """
        Create a JWT holding secret - that will use the master secret for JWT making - and which will have
        broad permissions to be read by all roles.

        Args:
            master_secret: the master secret to read for the API key for JWT making
            ica_base_url: the base url of ICA to be passed on to the rotators
            key_name: a unique string that we use to name this JWT secret
            project_ids: *either* a single string or a list of string - the choice of type *will* affect
                         the resulting secret output i.e a string input will end up different to a list with one string!

        Returns:
            the JWT secret
        """
        dirname = os.path.dirname(__file__)
        filename = os.path.join(dirname, "runtime/jwt_producer")

        env = {
            "MASTER_ARN": master_secret.secret_arn,
            "ICA_BASE_URL": ica_base_url,
        }

        # flip the instructions to our single lambda - the handle either a single JWT generator or
        # dictionary of JWTS
        if isinstance(project_ids, List):
            env["PROJECT_IDS"] = " ".join(project_ids)
        else:
            env["PROJECT_ID"] = project_ids

        jwt_producer = lambda_.Function(
            self,
            "JwtProduce" + key_name,
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.AssetCode(filename),
            handler="lambda_entrypoint.main",
            timeout=Duration.minutes(1),
            environment=env,
        )

        # this end makes the lambda role for JWT producer able to attempt to read the master secret
        # (this is only one part of the permission decision though - also need to set the Secrets policy too)
        master_secret.grant_read(jwt_producer)

        # secret itself - no default value as it will eventually get replaced by the JWT
        jwt_secret = secretsmanager.Secret(
            self,
            "Jwt" + key_name,
            secret_name=key_name,
            description="JWT(s) providing access to ICA projects",
        )

        # the rotation function that creates JWTs
        jwt_secret.add_rotation_schedule(
            "JwtSecretRotation",
            automatically_after=Duration.days(ROTATION_DAYS),
            rotation_lambda=jwt_producer,
        )

        return jwt_secret, jwt_producer
Ejemplo n.º 25
0
    def __init__(self, scope: core.App, id: str, props, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        subnets = []
        subnets.append(
            aws_ec2.SubnetConfiguration(name="DeviceSubnet",
                                        subnet_type=aws_ec2.SubnetType.PUBLIC,
                                        cidr_mask=24))

        vpc = aws_ec2.Vpc(self,
                          "DeviceVpc",
                          max_azs=2,
                          subnet_configuration=subnets)

        # Iterate the private subnets
        selection = vpc.select_subnets(subnet_type=aws_ec2.SubnetType.PUBLIC)

        sg = aws_ec2.SecurityGroup(
            self,
            id="FarGateSecGroup",
            vpc=vpc,
            allow_all_outbound=True,
            description="Allow access to virtual device",
            security_group_name="Virtual Device Security Group")

        sg.add_ingress_rule(peer=aws_ec2.Peer.any_ipv4(),
                            connection=aws_ec2.Port.tcp(80))

        rnd_suffix = create_random_name(4).lower()

        # pipeline requires versioned bucket
        bucket = aws_s3.Bucket(self,
                               "SourceBucket",
                               bucket_name="{}-{}-{}".format(
                                   props['namespace'].lower(),
                                   core.Aws.ACCOUNT_ID, core.Aws.REGION),
                               versioned=True,
                               removal_policy=core.RemovalPolicy.DESTROY)

        # ssm parameter to get bucket name later
        bucket_param = aws_ssm.StringParameter(
            self,
            "ParameterBucketName",
            parameter_name=f"{props['namespace']}-bucket",
            string_value=bucket.bucket_name,
            description='IoT playground pipeline bucket')

        # ecr repo to push docker container into
        ecr = aws_ecr.Repository(self,
                                 "ECR",
                                 repository_name=f"{props['namespace']}",
                                 removal_policy=core.RemovalPolicy.DESTROY)

        # codebuild project meant to run in pipeline
        cb_docker_build = aws_codebuild.PipelineProject(
            self,
            "DockerBuild",
            project_name=f"{props['namespace']}-Docker-Build",
            build_spec=aws_codebuild.BuildSpec.from_source_filename(
                filename='docker/docker_build_buildspec.yml'),
            environment=aws_codebuild.BuildEnvironment(privileged=True, ),

            # pass the ecr repo uri into the codebuild project so codebuild knows where to push
            environment_variables={
                'ecr':
                aws_codebuild.BuildEnvironmentVariable(
                    value=ecr.repository_uri),
                'tag':
                aws_codebuild.BuildEnvironmentVariable(value='virtual_device')
            },
            description='Pipeline for CodeBuild',
            timeout=core.Duration.minutes(10),
        )
        # codebuild iam permissions to read write s3
        bucket.grant_read_write(cb_docker_build)

        # codebuild permissions to interact with ecr
        ecr.grant_pull_push(cb_docker_build)

        ecs_cluster = aws_ecs.Cluster(self, 'DeviceCluster', vpc=vpc)

        fargate_task_def = aws_ecs.FargateTaskDefinition(
            self,
            'DeviceTaskDef',
            cpu=512,
            memory_limit_mib=1024
            #network_mode=aws_ecs.NetworkMode.AWS_VPC,
        )

        # fargate_task_def.add_to_task_role_policy(aws_iam.PolicyStatement(
        #     effect=aws_iam.Effect.ALLOW,
        #     actions=[
        #         "s3:PutObject"],
        #     resources=["*"]
        # ))

        fargate_task_def.add_to_execution_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    actions=[
                                        "ecr:GetAuthorizationToken",
                                        "ecr:BatchCheckLayerAvailability",
                                        "ecr:GetDownloadUrlForLayer",
                                        "ecr:BatchGetImage",
                                        "logs:CreateLogStream",
                                        "logs:PutLogEvents"
                                    ],
                                    resources=["*"]))

        container_image = aws_ecs.EcrImage(repository=ecr,
                                           tag="virtual_device")

        logging = aws_ecs.AwsLogDriver(stream_prefix="virtual_device")

        container = fargate_task_def.add_container("DeviceContainer",
                                                   image=container_image,
                                                   cpu=512,
                                                   memory_limit_mib=1024,
                                                   logging=logging,
                                                   essential=True)

        port_mapping = aws_ecs.PortMapping(container_port=80,
                                           host_port=80,
                                           protocol=aws_ecs.Protocol.TCP)

        container.add_port_mappings(port_mapping)

        # The code that defines your stack goes here
        table = aws_dynamodb.Table(self,
                                   "DeviceFactoryCatalog",
                                   partition_key=aws_dynamodb.Attribute(
                                       name="id",
                                       type=aws_dynamodb.AttributeType.STRING),
                                   read_capacity=3,
                                   write_capacity=3)

        function = aws_lambda.Function(
            self,
            "DeviceFactoryLambda",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            handler="lambda_function.lambda_handler",
            code=aws_lambda.Code.asset("../lambdas/device_factory_lambda"),
            timeout=Duration.minutes(1))

        function.add_environment("BUCKET_NAME", bucket.bucket_name)
        function.add_environment("ECS_CLUSTER", ecs_cluster.cluster_name)
        function.add_environment("ECS_TASK_DEF",
                                 fargate_task_def.task_definition_arn)
        function.add_environment("DDB_TABLE_DEVICE_CATALOG", table.table_name)
        function.add_environment("SUBNET_1", selection.subnets[0].subnet_id)
        function.add_environment("SUBNET_2", selection.subnets[1].subnet_id)
        function.add_environment("SEC_GROUP", sg.security_group_id)

        table.grant_read_write_data(function)

        function.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    actions=["iot:*"],
                                    resources=["*"]))

        function.add_to_role_policy(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                actions=["s3:PutObject", "s3:GetObject"],
                resources=["{}/*".format(bucket.bucket_arn)]))

        function.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    actions=["iam:PassRole"],
                                    resources=["arn:aws:iam::*:role/*"]))

        function.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    actions=["ecs:RunTask", "ecs:StopTask"],
                                    resources=["*"]))

        api_gtw = aws_apigateway.LambdaRestApi(
            self,
            id="DeviceFactoryApi",
            rest_api_name="DeviceFactoryApi",
            handler=function)

        # ssm parameter to get api endpoint later
        bucket_param = aws_ssm.StringParameter(
            self,
            "ParameterDeviceFactoryEndpoint",
            parameter_name=f"{props['namespace']}-devicefactoryendpoint",
            string_value=api_gtw.url,
            description='IoT playground device factory endpoint')

        # ssm parameter to get api endpoint later
        bucket_param = aws_ssm.StringParameter(
            self,
            "ParameterEcrUri",
            parameter_name=f"{props['namespace']}-ecruri",
            string_value=ecr.repository_uri,
            description='IoT playground ECR URI')

        # ssm parameter to get cluster name
        bucket_param = aws_ssm.StringParameter(
            self,
            "ParameterClusterName",
            parameter_name=f"{props['namespace']}-clustername",
            string_value=ecs_cluster.cluster_name,
            description='IoT playground Cluster Name')

        core.CfnOutput(
            self,
            "EcrUri",
            description="ECR URI",
            value=ecr.repository_uri,
        )

        core.CfnOutput(self,
                       "S3Bucket",
                       description="S3 Bucket",
                       value=bucket.bucket_name)

        core.CfnOutput(self,
                       "DeviceFactoryEndpoint",
                       description="Device Factory Endpoint",
                       value=api_gtw.url)

        self.output_props = props.copy()
        self.output_props['bucket'] = bucket
        self.output_props['cb_docker_build'] = cb_docker_build
Ejemplo n.º 26
0
    def create_event_handling(
        self,
        secrets: List[secretsmanager.Secret],
        slack_host_ssm_name: str,
        slack_webhook_ssm_name: str,
    ) -> lambda_.Function:
        """

        Args:
            secrets: a list of secrets that we will track for events
            slack_host_ssm_name: the SSM parameter name for the slack host
            slack_webhook_ssm_name: the SSM parameter name for the slack webhook id

        Returns:
            a lambda event handler
        """
        dirname = os.path.dirname(__file__)
        filename = os.path.join(dirname, "runtime/notify_slack")

        env = {
            # for the moment we don't parametrise at the CDK level.. only needed if this is liable to change
            "SLACK_HOST_SSM_NAME": slack_host_ssm_name,
            "SLACK_WEBHOOK_SSM_NAME": slack_webhook_ssm_name,
        }

        notifier = lambda_.Function(
            self,
            "NotifySlack",
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.AssetCode(filename),
            handler="lambda_entrypoint.main",
            timeout=Duration.minutes(1),
            environment=env,
        )

        get_ssm_policy = PolicyStatement()

        # there is some weirdness around SSM parameter ARN formation and leading slashes.. can't be bothered
        # looking into right now - as the ones we want to use do a have a leading slash
        # but put in this exception in case
        if not slack_webhook_ssm_name.startswith(
                "/") or not slack_host_ssm_name.startswith("/"):
            raise Exception(
                "SSM parameters need to start with a leading slash")

        # see here - the *required* slash between parameter and the actual name uses the leading slash from the actual
        # name itself.. which is wrong..
        get_ssm_policy.add_resources(
            f"arn:aws:ssm:*:*:parameter{slack_host_ssm_name}")
        get_ssm_policy.add_resources(
            f"arn:aws:ssm:*:*:parameter{slack_webhook_ssm_name}")
        get_ssm_policy.add_actions("ssm:GetParameter")

        notifier.add_to_role_policy(get_ssm_policy)

        # we want a rule that traps all the rotation failures for our JWT secrets
        rule = Rule(
            self,
            "NotifySlackRule",
        )

        rule.add_event_pattern(
            source=["aws.secretsmanager"],
            detail={
                # at the moment only interested in these - add extra events into this array if wanting more
                "eventName": ["RotationFailed", "RotationSucceeded"],
                "additionalEventData": {
                    "SecretId": list(map(lambda s: s.secret_arn, secrets))
                },
            },
        )

        rule.add_target(LambdaFunction(notifier))

        return notifier
Ejemplo n.º 27
0
    def __init__(self, app: core.App, id: str, props, **kwargs) -> None:
        super().__init__(app, id, **kwargs)

        #cloud9 - Created manually because of permissions
        # cloud9 = aws_cloud9.CfnEnvironmentEC2(self, "IoT_Lab_Cloud9",
        #     instance_type="t2.small",
        #     automatic_stop_time_minutes=30,
        #     description="A Cloud9 env for doing the workshop"  #,
        #     #owner_arn="arn:aws:iam::*:user/virginia"
        # )

        # SNS
        topic_dda = aws_sns.Topic(self, "SNS_DDA_Findings",
        display_name="DDA_Findings",
        topic_name="DDA_Findings")

        role_dda = aws_iam.Role(self, 'DDA_to_SNS_Role',
            assumed_by=aws_iam.ServicePrincipal('iot.amazonaws.com')
        )
        
        policy_stmt_dda = aws_iam.PolicyStatement(
            actions=["sns:Publish"],
            resources=[topic_dda.topic_arn]
        )

        role_dda.add_to_policy(policy_stmt_dda)

        #permissive iot policy
        # permissive_policy = {
        #     "Version": "2012-10-17",
        #    "Statement": [{ 
        #       "Effect": "Allow",
        #       "Action": [
        #         "iot:Connect",
        #         "iot:Publish",
        #         "iot:Subscribe",
        #         "iot:Receive"
        #       ],
        #       "Resource": "*"
        #    }]
        # }

        # Removed. It will be created during the workshop
        #aws_iot.CfnPolicy(self, 
        #    "PermissivePolicy",
        #    policy_document=permissive_policy,
        #    policy_name="AUDIT_WS_PermisivePolicy")

        # Lambda
        function_dda = aws_lambda.Function(self, "DDA_FindingsProcessorLambda",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            handler="lambda_function.lambda_handler",
            code=aws_lambda.Code.asset("../lambdas/dda_findings_processor_lambda"),
            timeout=Duration.minutes(5))

        function_dda.add_environment("LOG_LEVEL", "DEBUG")
        function_dda.add_environment("TARGET_POLICY_NAME_PREFIX", TARGET_POLICY_NAME_PREFIX)
        
        # PolicyName: DeviceDefenderListResultsPolicy
        function_dda.add_to_role_policy(aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                #"iot:DescribeAccountAuditConfiguration",
                #"iot:DescribeAuditTask",
                #"iot:ListAuditTasks",
                #"iot:ListScheduledAudits",
                "iot:ListAuditFindings"
            ],
            resources=["*"]
        ))

        # PolicyName: IoTUpdatePolicy
        function_dda.add_to_role_policy(aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "iot:CreatePolicyVersion",
                "iot:DeletePolicyVersion",
                "iot:ListPolicyVersions",
                "iot:SetDefaultPolicyVersion"
            ],
            resources=["arn:aws:iot:*:*:policy/{}*".format(TARGET_POLICY_NAME_PREFIX)]
        ))

        #function_dda.add_to_role_policy(aws_iam.PolicyStatement(
        #    effect=aws_iam.Effect.ALLOW,
        #    actions=["s3:PutObject", "s3:GetObject"],
        #    resources=["arn:aws:s3:::dreis-sandbox-temp/*"]
        #))

        #function_dda.add_to_role_policy(aws_iam.PolicyStatement(
        #    effect=aws_iam.Effect.ALLOW,
        #    actions=["iam:PassRole"],
        #    resources=["arn:aws:iam::*:role/*"]
        #))

        role_iot_logging = aws_iam.Role(self, 'AWSIoTLogging_Role',
            assumed_by=aws_iam.ServicePrincipal('iot.amazonaws.com')
        )
        
        policy_iot_logging = aws_iam.PolicyStatement(
            actions=[
                "logs:CreateLogGroup", 
                "logs:CreateLogStream",
                "logs:PutLogEvents",
                "logs:PutMetricFilter",
                "logs:PutRetentionPolicy"
            ],
            resources=["arn:aws:logs:*:*:log-group:*:log-stream:*"]
        )

        role_iot_logging.add_to_policy(policy_iot_logging)

        # SNS
        topic_ddd = aws_sns.Topic(self, "SNS_DDD_Alerts",
        display_name="DDD_Alerts",
        topic_name="DDD_Alerts")

        role_ddd = aws_iam.Role(self, 'DDD_to_SNS_Role',
            assumed_by=aws_iam.ServicePrincipal('iot.amazonaws.com')
        )
        
        policy_stmt_ddd = aws_iam.PolicyStatement(
            actions=["sns:Publish"],
            resources=[topic_ddd.topic_arn]
        )

        role_ddd.add_to_policy(policy_stmt_ddd)

        # Lambda
        function_ddd = aws_lambda.Function(self, "ddd_alertsprocessorLambda",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            handler="lambda_function.lambda_handler",
            code=aws_lambda.Code.asset("../lambdas/ddd_alrts_processor_lambda"),
            timeout=Duration.minutes(5))

        function_ddd.add_environment("LOG_LEVEL", "DEBUG")

        # PolicyName: IoTUpdatePolicy
        function_ddd.add_to_role_policy(aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "iot:CreateThingGroup", # Required for WA
                "iot:AddThingToThingGroup",
                "iot:UpdateThingGroupsForThing",
                "iot:UpdateThingShadow"
            ],
            resources=["arn:aws:iot:*:*:*"]
        ))

        
        # cfn output
        # core.CfnOutput(
        #     self, "PipelineOut",
        #     description="Pipeline",
        #     value=pipeline.pipeline_name
        # )
Ejemplo n.º 28
0
    def __init__(self,
                 scope: Construct,
                 id: str,
                 elasticsearch_index: ElasticsearchIndexResource,
                 dynamodb_table: Table,
                 kms_key: Optional[Key] = None,
                 *,
                 sagemaker_endpoint_name: str = None,
                 sagemaker_endpoint_arn: str = None,
                 sagemaker_embeddings_key: str = None) -> None:
        super().__init__(scope=scope, id=id)

        elasticsearch_layer = BElasticsearchLayer(
            scope=self, name=f"{id}ElasticsearchLayer")

        if bool(sagemaker_endpoint_name) ^ bool(sagemaker_embeddings_key):
            raise ValueError(
                f'In order to use sentence embedding, all of the following enviroment variables are required: '
                f'SAGEMAKER_ENDPOINT_NAME, SAGEMAKER_EMBEDDINGS_KEY. '
                f'Else, provide none of above.')

        if sagemaker_endpoint_name and not sagemaker_endpoint_arn:
            sagemaker_endpoint_arn = self.__resolve_sagemaker_endpoints_arn(
                '*')

        optional_sagemaker_parameters = {
            'SAGEMAKER_ENDPOINT_NAME': sagemaker_endpoint_name or None,
            'SAGEMAKER_EMBEDDINGS_KEY': sagemaker_embeddings_key or None
        }

        initial_cloner_function = SingletonFunction(
            scope=self,
            id='InitialClonerFunction',
            uuid='e01116a4-f939-43f2-8f5b-cc9f862c9e01',
            lambda_purpose='InitialClonerSingletonLambda',
            code=Code.from_asset(initial_cloner_root),
            handler='index.handler',
            runtime=Runtime.PYTHON_3_8,
            layers=[elasticsearch_layer],
            log_retention=RetentionDays.ONE_MONTH,
            memory_size=128,
            timeout=Duration.minutes(15),
            role=Role(
                scope=self,
                id='InitialClonerFunctionRole',
                assumed_by=ServicePrincipal('lambda.amazonaws.com'),
                inline_policies={
                    'LogsPolicy':
                    PolicyDocument(statements=[
                        PolicyStatement(
                            actions=[
                                'logs:CreateLogGroup',
                                'logs:CreateLogStream',
                                'logs:PutLogEvents',
                                'logs:DescribeLogStreams',
                            ],
                            resources=['arn:aws:logs:*:*:*'],
                            effect=Effect.ALLOW,
                        )
                    ]),
                    'ElasticsearchPolicy':
                    PolicyDocument(statements=[
                        PolicyStatement(
                            actions=[
                                'es:ESHttpDelete',
                                'es:ESHttpGet',
                                'es:ESHttpHead',
                                'es:ESHttpPatch',
                                'es:ESHttpPost',
                                'es:ESHttpPut',
                            ],
                            resources=['*'],
                            effect=Effect.ALLOW,
                        )
                    ]),
                    'DynamodbPolicy':
                    PolicyDocument(statements=[
                        PolicyStatement(
                            actions=['dynamodb:*'],
                            resources=['*'],
                            effect=Effect.ALLOW,
                        )
                    ]),
                },
                description='Role for DynamoDB Initial Cloner Function',
            ),
        )

        if kms_key:
            initial_cloner_function.add_to_role_policy(
                PolicyStatement(
                    actions=['kms:Decrypt'],
                    resources=[kms_key.key_arn],
                    effect=Effect.ALLOW,
                ), )

        initial_cloner = CustomResource(
            scope=self,
            id='InitialCloner',
            service_token=initial_cloner_function.function_arn,
            removal_policy=RemovalPolicy.DESTROY,
            properties={
                'DynamodbTableName':
                dynamodb_table.table_name,
                'ElasticsearchIndexName':
                elasticsearch_index.index_name,
                'ElasticsearchEndpoint':
                elasticsearch_index.elasticsearch_domain.domain_endpoint,
            },
            resource_type='Custom::ElasticsearchInitialCloner',
        )

        primary_key_field = initial_cloner.get_att_string('PrimaryKeyField')

        dynamodb_stream_arn = dynamodb_table.table_stream_arn
        if not dynamodb_stream_arn:
            raise Exception('DynamoDB streams must be enabled for the table')

        dynamodb_event_source = DynamoEventSource(
            table=dynamodb_table,
            starting_position=StartingPosition.LATEST,
            enabled=True,
            max_batching_window=Duration.seconds(10),
            bisect_batch_on_error=True,
            parallelization_factor=2,
            batch_size=1000,
            retry_attempts=10,
        )

        cloner_inline_policies = {
            'LogsPolicy':
            PolicyDocument(statements=[
                PolicyStatement(
                    actions=[
                        'logs:CreateLogGroup',
                        'logs:CreateLogStream',
                        'logs:PutLogEvents',
                        'logs:DescribeLogStreams',
                    ],
                    resources=['arn:aws:logs:*:*:*'],
                    effect=Effect.ALLOW,
                )
            ]),
            'ElasticsearchPolicy':
            PolicyDocument(statements=[
                PolicyStatement(
                    actions=[
                        'es:ESHttpDelete',
                        'es:ESHttpGet',
                        'es:ESHttpHead',
                        'es:ESHttpPatch',
                        'es:ESHttpPost',
                        'es:ESHttpPut',
                    ],
                    resources=[
                        f'{elasticsearch_index.elasticsearch_domain.domain_arn}/*'
                    ],
                    effect=Effect.ALLOW,
                )
            ]),
            'DynamodbStreamsPolicy':
            PolicyDocument(statements=[
                PolicyStatement(
                    actions=[
                        'dynamodb:DescribeStream',
                        'dynamodb:GetRecords',
                        'dynamodb:GetShardIterator',
                        'dynamodb:ListStreams',
                    ],
                    resources=[dynamodb_stream_arn],
                    effect=Effect.ALLOW,
                )
            ]),
        }

        if sagemaker_endpoint_arn:
            cloner_inline_policies['SagemakerPolicy'] = PolicyDocument(
                statements=[
                    PolicyStatement(actions=['sagemaker:InvokeEndpoint'],
                                    resources=[sagemaker_endpoint_arn],
                                    effect=Effect.ALLOW)
                ])

        cloner_function = Function(
            scope=self,
            id='ClonerFunction',
            code=Code.from_asset(cloner_root),
            handler='index.handler',
            runtime=Runtime.PYTHON_3_8,
            environment={
                'ES_INDEX_NAME': elasticsearch_index.index_name,
                'ES_DOMAIN_ENDPOINT':
                elasticsearch_index.elasticsearch_domain.domain_endpoint,
                'PRIMARY_KEY_FIELD': primary_key_field,
                **{
                    k: optional_sagemaker_parameters[k]
                    for k in optional_sagemaker_parameters if all(optional_sagemaker_parameters.values(
                    ))
                }
            },
            events=[dynamodb_event_source],
            layers=[elasticsearch_layer],
            log_retention=RetentionDays.ONE_MONTH,
            memory_size=128,
            role=Role(
                scope=self,
                id='ClonerFunctionRole',
                assumed_by=ServicePrincipal('lambda.amazonaws.com'),
                inline_policies=cloner_inline_policies,
                description='Role for DynamoDB Cloner Function',
            ),
            timeout=Duration.seconds(30),
        )

        if kms_key:
            cloner_function.add_to_role_policy(
                PolicyStatement(
                    actions=['kms:Decrypt'],
                    resources=[kms_key.key_arn],
                    effect=Effect.ALLOW,
                ))
    def __init__(self, scope: core.Construct, id: str, application_prefix: str,
                 suffix: str, kda_role: Role, **kwargs):
        super().__init__(scope, id, **kwargs)

        stack = Stack.of(self)
        region = stack.region

        # Create Cognito User Pool
        self.__user_pool = CfnUserPool(
            scope=self,
            id='UserPool',
            admin_create_user_config={'allowAdminCreateUserOnly': True},
            policies={'passwordPolicy': {
                'minimumLength': 8
            }},
            username_attributes=['email'],
            auto_verified_attributes=['email'],
            user_pool_name=application_prefix + '_user_pool')

        # Create a Cognito User Pool Domain using the newly created Cognito User Pool
        CfnUserPoolDomain(scope=self,
                          id='CognitoDomain',
                          domain=application_prefix + '-' + suffix,
                          user_pool_id=self.user_pool.ref)

        # Create Cognito Identity Pool
        self.__id_pool = CfnIdentityPool(
            scope=self,
            id='IdentityPool',
            allow_unauthenticated_identities=False,
            cognito_identity_providers=[],
            identity_pool_name=application_prefix + '_identity_pool')

        trust_relationship = FederatedPrincipal(
            federated='cognito-identity.amazonaws.com',
            conditions={
                'StringEquals': {
                    'cognito-identity.amazonaws.com:aud': self.id_pool.ref
                },
                'ForAnyValue:StringLike': {
                    'cognito-identity.amazonaws.com:amr': 'authenticated'
                }
            },
            assume_role_action='sts:AssumeRoleWithWebIdentity')
        # IAM role for master user
        master_auth_role = Role(scope=self,
                                id='MasterAuthRole',
                                assumed_by=trust_relationship)
        # Role for authenticated user
        limited_auth_role = Role(scope=self,
                                 id='LimitedAuthRole',
                                 assumed_by=trust_relationship)
        # Attach Role to Identity Pool
        CfnIdentityPoolRoleAttachment(
            scope=self,
            id='userPoolRoleAttachment',
            identity_pool_id=self.id_pool.ref,
            roles={'authenticated': limited_auth_role.role_arn})
        # Create master-user-group
        CfnUserPoolGroup(scope=self,
                         id='AdminsGroup',
                         user_pool_id=self.user_pool.ref,
                         group_name='master-user-group',
                         role_arn=master_auth_role.role_arn)
        # Create limited-user-group
        CfnUserPoolGroup(scope=self,
                         id='UsersGroup',
                         user_pool_id=self.user_pool.ref,
                         group_name='limited-user-group',
                         role_arn=limited_auth_role.role_arn)
        # Role for the Elasticsearch service to access Cognito
        es_role = Role(scope=self,
                       id='EsRole',
                       assumed_by=ServicePrincipal(service='es.amazonaws.com'),
                       managed_policies=[
                           ManagedPolicy.from_aws_managed_policy_name(
                               'AmazonESCognitoAccess')
                       ])

        # Use the following command line to generate the python dependencies layer content
        # pip3 install -t lambda-layer/python/lib/python3.8/site-packages -r lambda/requirements.txt
        # Build the lambda layer assets
        subprocess.call([
            'pip', 'install', '-t',
            'streaming/streaming_cdk/lambda-layer/python/lib/python3.8/site-packages',
            '-r', 'streaming/streaming_cdk/bootstrap-lambda/requirements.txt',
            '--upgrade'
        ])

        requirements_layer = _lambda.LayerVersion(
            scope=self,
            id='PythonRequirementsTemplate',
            code=_lambda.Code.from_asset(
                'streaming/streaming_cdk/lambda-layer'),
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_8])

        # This lambda function will bootstrap the Elasticsearch cluster
        bootstrap_function_name = 'AESBootstrap'
        register_template_lambda = _lambda.Function(
            scope=self,
            id='RegisterTemplate',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset(
                'streaming/streaming_cdk/bootstrap-lambda'),
            handler='es-bootstrap.lambda_handler',
            environment={
                'REGION': region,
                'KDA_ROLE_ARN': kda_role.role_arn,
                'MASTER_ROLE_ARN': master_auth_role.role_arn
            },
            layers=[requirements_layer],
            timeout=Duration.minutes(15),
            function_name=bootstrap_function_name)

        lambda_role = register_template_lambda.role
        lambda_role.add_to_policy(
            PolicyStatement(
                actions=['logs:CreateLogGroup'],
                resources=[stack.format_arn(service='logs', resource='*')]))
        lambda_role.add_to_policy(
            PolicyStatement(
                actions=['logs:CreateLogStream', 'logs:PutLogEvents'],
                resources=[
                    stack.format_arn(service='logs',
                                     resource='log_group',
                                     resource_name='/aws/lambda/' +
                                     bootstrap_function_name + ':*')
                ]))

        # Let the lambda assume the master role so that actions can be executed on the cluster
        # https://aws.amazon.com/premiumsupport/knowledge-center/lambda-function-assume-iam-role/
        lambda_role.add_to_policy(
            PolicyStatement(actions=['sts:AssumeRole'],
                            resources=[master_auth_role.role_arn]))

        master_auth_role.assume_role_policy.add_statements(
            PolicyStatement(actions=['sts:AssumeRole'],
                            principals=[lambda_role]))

        # List all the roles that are allowed to access the Elasticsearch cluster.
        roles = [
            ArnPrincipal(limited_auth_role.role_arn),
            ArnPrincipal(master_auth_role.role_arn),
            ArnPrincipal(kda_role.role_arn)
        ]  # The users
        if register_template_lambda and register_template_lambda.role:
            roles.append(ArnPrincipal(
                lambda_role.role_arn))  # The lambda used to bootstrap
        # Create kms key
        kms_key = Key(scope=self,
                      id='kms-es',
                      alias='custom/es',
                      description='KMS key for Elasticsearch domain',
                      enable_key_rotation=True)

        # AES Log Groups
        es_app_log_group = logs.LogGroup(scope=self,
                                         id='EsAppLogGroup',
                                         retention=logs.RetentionDays.ONE_WEEK,
                                         removal_policy=RemovalPolicy.RETAIN)

        # Create the Elasticsearch domain
        es_domain_arn = stack.format_arn(service='es',
                                         resource='domain',
                                         resource_name=application_prefix +
                                         '/*')

        es_access_policy = PolicyDocument(statements=[
            PolicyStatement(principals=roles,
                            actions=[
                                'es:ESHttpGet', 'es:ESHttpPut',
                                'es:ESHttpPost', 'es:ESHttpDelete'
                            ],
                            resources=[es_domain_arn])
        ])
        self.__es_domain = es.CfnDomain(
            scope=self,
            id='searchDomain',
            elasticsearch_cluster_config={
                'instanceType': 'r5.large.elasticsearch',
                'instanceCount': 2,
                'dedicatedMasterEnabled': True,
                'dedicatedMasterCount': 3,
                'dedicatedMasterType': 'r5.large.elasticsearch',
                'zoneAwarenessEnabled': True,
                'zoneAwarenessConfig': {
                    'AvailabilityZoneCount': '2'
                },
            },
            encryption_at_rest_options={
                'enabled': True,
                'kmsKeyId': kms_key.key_id
            },
            node_to_node_encryption_options={'enabled': True},
            ebs_options={
                'volumeSize': 10,
                'ebsEnabled': True
            },
            elasticsearch_version='7.9',
            domain_name=application_prefix,
            access_policies=es_access_policy,
            cognito_options={
                'enabled': True,
                'identityPoolId': self.id_pool.ref,
                'roleArn': es_role.role_arn,
                'userPoolId': self.user_pool.ref
            },
            advanced_security_options={
                'enabled': True,
                'internalUserDatabaseEnabled': False,
                'masterUserOptions': {
                    'masterUserArn': master_auth_role.role_arn
                }
            },
            domain_endpoint_options={
                'enforceHttps': True,
                'tlsSecurityPolicy': 'Policy-Min-TLS-1-2-2019-07'
            },
            # log_publishing_options={
            #     # 'ES_APPLICATION_LOGS': {
            #     #     'enabled': True,
            #     #     'cloud_watch_logs_log_group_arn': es_app_log_group.log_group_arn
            #     # },
            #     # 'AUDIT_LOGS': {
            #     #     'enabled': True,
            #     #     'cloud_watch_logs_log_group_arn': ''
            #     # },
            #     # 'SEARCH_SLOW_LOGS': {
            #     #     'enabled': True,
            #     #     'cloud_watch_logs_log_group_arn': ''
            #     # },
            #     # 'INDEX_SLOW_LOGS': {
            #     #     'enabled': True,
            #     #     'cloud_watch_logs_log_group_arn': ''
            #     # }
            # }
        )

        # Not yet on the roadmap...
        # See https://github.com/aws-cloudformation/aws-cloudformation-coverage-roadmap/issues/283
        # self.es_domain.add_property_override('ElasticsearchClusterConfig.WarmEnabled', True)
        # self.es_domain.add_property_override('ElasticsearchClusterConfig.WarmCount', 2)
        # self.es_domain.add_property_override('ElasticsearchClusterConfig.WarmType', 'ultrawarm1.large.elasticsearch')

        # Deny all roles from the authentication provider - users must be added to groups
        # This lambda function will bootstrap the Elasticsearch cluster
        cognito_function_name = 'CognitoFix'
        cognito_template_lambda = _lambda.Function(
            scope=self,
            id='CognitoFixLambda',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset(
                'streaming/streaming_cdk/cognito-lambda'),
            handler='handler.handler',
            environment={
                'REGION': scope.region,
                'USER_POOL_ID': self.__user_pool.ref,
                'IDENTITY_POOL_ID': self.__id_pool.ref,
                'LIMITED_ROLE_ARN': limited_auth_role.role_arn
            },
            timeout=Duration.minutes(15),
            function_name=cognito_function_name)

        lambda_role = cognito_template_lambda.role
        lambda_role.add_to_policy(
            PolicyStatement(
                actions=['logs:CreateLogGroup'],
                resources=[stack.format_arn(service='logs', resource='*')]))
        lambda_role.add_to_policy(
            PolicyStatement(
                actions=['logs:CreateLogStream', 'logs:PutLogEvents'],
                resources=[
                    stack.format_arn(service='logs',
                                     resource='log_group',
                                     resource_name='/aws/lambda/' +
                                     cognito_function_name + ':*')
                ]))
        lambda_role.add_to_policy(
            PolicyStatement(actions=['cognito-idp:ListUserPoolClients'],
                            resources=[self.user_pool.attr_arn]))
        lambda_role.add_to_policy(
            PolicyStatement(actions=['iam:PassRole'],
                            resources=[limited_auth_role.role_arn]))

        cognito_id_res = Fn.join(':', [
            'arn:aws:cognito-identity', scope.region, scope.account,
            Fn.join('/', ['identitypool', self.__id_pool.ref])
        ])

        lambda_role.add_to_policy(
            PolicyStatement(actions=['cognito-identity:SetIdentityPoolRoles'],
                            resources=[cognito_id_res]))

        # Get the Domain Endpoint and register it with the lambda as environment variable.
        register_template_lambda.add_environment(
            'DOMAIN', self.__es_domain.attr_domain_endpoint)

        CfnOutput(scope=self,
                  id='createUserUrl',
                  description="Create a new user in the user pool here.",
                  value="https://" + scope.region +
                  ".console.aws.amazon.com/cognito/users?region=" +
                  scope.region + "#/pool/" + self.user_pool.ref + "/users")
        CfnOutput(scope=self,
                  id='kibanaUrl',
                  description="Access Kibana via this URL.",
                  value="https://" + self.__es_domain.attr_domain_endpoint +
                  "/_plugin/kibana/")

        bootstrap_lambda_provider = Provider(
            scope=self,
            id='BootstrapLambdaProvider',
            on_event_handler=register_template_lambda)
        CustomResource(scope=self,
                       id='ExecuteRegisterTemplate',
                       service_token=bootstrap_lambda_provider.service_token,
                       properties={'Timeout': 900})

        cognito_lambda_provider = Provider(
            scope=self,
            id='CognitoFixLambdaProvider',
            on_event_handler=cognito_template_lambda)
        cognito_fix_resource = CustomResource(
            scope=self,
            id='ExecuteCognitoFix',
            service_token=cognito_lambda_provider.service_token)
        cognito_fix_resource.node.add_dependency(self.__es_domain)
Ejemplo n.º 30
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # define the table stores Todo todos
        table = aws_dynamodb.Table(self, "Table",
            partition_key=aws_dynamodb.Attribute(
                name="id",
                type=aws_dynamodb.AttributeType.STRING),
            billing_mode = aws_dynamodb.BillingMode.PAY_PER_REQUEST)
#            read_capacity=10,
#            write_capacity=5)

        # define the Lambda functions
        list_handler = aws_lambda.Function(self, "TodoListFunction",
            code=aws_lambda.Code.asset("./lambda"),
            handler="list.list",
            timeout=Duration.minutes(5),
            runtime=aws_lambda.Runtime.PYTHON_3_7)

        create_handler = aws_lambda.Function(self, "TodoCreateFunction",
            code=aws_lambda.Code.asset("./lambda"),
            handler="create.create",
            timeout=Duration.minutes(5),
            runtime=aws_lambda.Runtime.PYTHON_3_7)
        
        get_handler = aws_lambda.Function(self, "TodoGetFunction",
            code=aws_lambda.Code.asset("./lambda"),
            handler="get.get",
            timeout=Duration.minutes(5),
            runtime=aws_lambda.Runtime.PYTHON_3_7)
        
        update_handler = aws_lambda.Function(self, "TodoUpdateFunction",
            code=aws_lambda.Code.asset("./lambda"),
            handler="update.update",
            timeout=Duration.minutes(5),
            runtime=aws_lambda.Runtime.PYTHON_3_7)
        
        delete_handler = aws_lambda.Function(self, "TodoDeleteFunction",
            code=aws_lambda.Code.asset("./lambda"),
            handler="delete.delete",
            timeout=Duration.minutes(5),
            runtime=aws_lambda.Runtime.PYTHON_3_7)

        # pass the table name to each handler through an environment variable
        # and grant the handler read/write permissions on the table.
        handler_list = [
            list_handler, 
            create_handler, 
            get_handler, 
            update_handler, 
            delete_handler
        ]
        for handler in handler_list:
            handler.add_environment('DYNAMODB_TABLE', table.table_name)
            table.grant_read_write_data(handler)

        # define the API endpoint
        api = aws_apigateway.LambdaRestApi(self, "TodoApi",
            handler=list_handler,
            proxy=False)
        
        # define LambdaIntegrations
        list_lambda_integration = \
            aws_apigateway.LambdaIntegration(list_handler)
        create_lambda_integration = \
            aws_apigateway.LambdaIntegration(create_handler)
        get_lambda_integration = \
            aws_apigateway.LambdaIntegration(get_handler)
        update_lambda_integration = \
            aws_apigateway.LambdaIntegration(update_handler)
        delete_lambda_integration = \
            aws_apigateway.LambdaIntegration(delete_handler)
        
        # define REST API model and associate methods with LambdaIntegrations
        api.root.add_method('ANY')
        todos = api.root.add_resource('todos')
        todos.add_method('GET', list_lambda_integration);    #GET /todos
        todos.add_method('POST', create_lambda_integration); #POST /todos

        todo = todos.add_resource('{id}')
        todo.add_method('GET', get_lambda_integration);      #GET /todos/{id}
        todo.add_method('PUT', update_lambda_integration);   #PUT /todos/{id}
        todo.add_method('DELETE', delete_lambda_integration);#DELETE /todos/{id}