Ejemplo n.º 1
0
    def __init__(self, scope: core.Stack, id: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        self.base_platform = BasePlatform(self, self.stack_name)

        self.fargate_queue_processing_service = aws_ecs_patterns.QueueProcessingFargateService(
            self,
            "QueueProcessingService",
            service_name='queue-processing',
            cluster=self.base_platform.ecs_cluster,
            cpu=256,
            desired_task_count=0,
            max_scaling_capacity=10,
            memory_limit_mib=512,
            #image=aws_ecs.ContainerImage.from_registry("625806755153.dkr.ecr.us-east-1.amazonaws.com/python-workers:latest"),
            image=aws_ecs.ContainerImage.from_ecr_repository(
                aws_ecr.Repository.from_repository_name(
                    self, "mi_repositorio", "python-workers")),
            environment={
                "REGION": getenv('AWS_DEFAULT_REGION'),
                "AWS_ACCOUNT_ID": getenv('AWS_ACCOUNT_ID'),
            },
        )

        self.fargate_queue_processing_service.task_definition.add_to_task_role_policy(
            aws_iam.PolicyStatement(actions=['ec2:DescribeSubnets'],
                                    resources=['*']))
Ejemplo n.º 2
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        self.fargate_queue_service = aws_ecs_patterns.QueueProcessingFargateService(
            self,
            "QueueService",
            cpu=512,
            memory_limit_mib=2048,
            image=aws_ecs.ContainerImage.from_asset(
                directory='.',
                file='Dockerfile.queue_service',
                exclude=["cdk.out"]),
            desired_task_count=1)
Ejemplo n.º 3
0
    def __init__(self, scope, id, cluster: ecs.Cluster,
                 tracks_table: dynamodb.Table, input_bucket: s3.Bucket,
                 output_bucket: s3.Bucket, **kwargs):
        super().__init__(scope, id, **kwargs)

        worker_dir = os.path.abspath(
            os.path.join(os.path.dirname(__file__), 'worker'))

        self.service = ecs_patterns.QueueProcessingFargateService(
            self,
            'separator-service',
            cluster=cluster,
            cpu=2048,
            memory_limit_mib=8192,
            image=ecs.ContainerImage.from_asset(directory=worker_dir),
            environment={
                'TRACKS_TABLE_NAME': tracks_table.table_name,
                'OUTPUT_BUCKET_NAME': output_bucket.bucket_name
            })

        input_bucket.grant_read(self.service.task_definition.task_role)
        output_bucket.grant_write(self.service.task_definition.task_role)
        tracks_table.grant_read_write_data(
            self.service.task_definition.task_role)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # API Gateway needs to have resource policy granting FHIR Works on AWS lambda
        # execute permissions. Lambda function ARN will be passed during deployment as CDK context variable
        # FHIR Works lambda will need to have policy attached to its execution role
        # allowing it to invoke API
        # From --context resource-router-lambda-role="arn:aws:iam::123456789012:role/rolename"
        imported_resource_router_lambda_role = self.node.try_get_context(
            "resource-router-lambda-role"
        )
        # Amazon ECS on AWS Fargate container implementing connection manager
        # will be launched into a VPC that needs to have private and public subnets
        # and NAT gateway or instance
        # From --context vpc-id="vpc-123456"
        vpc_id = self.node.try_get_context("vpc-id")

        # The following parameters specify name of the HL7 server
        # that will be receiving transformed HL7v2 messages and TCP port
        # that it will be listening on
        # From --context hl7-server-name="hl7.example.com"
        # From --context hl7-port="2575"
        hl7_server_name = self.node.try_get_context("hl7-server-name")
        hl7_port = self.node.try_get_context("hl7-port")

        # In this proof of concept source of data for read interactions
        # is S3 bucket where mock HL7 server stores processed HL7 messages
        # From --context test-server-output-bucket-name="DOC-EXAMPLE-BUCKET"
        test_server_output_bucket_name = self.node.try_get_context(
            "test-server-output-bucket-name"
        )

        # SQS queue
        # Custom transform lambda communicates with Connectivity Manager using this SQS queue
        queue = sqs.Queue(
            self, f"{COMPONENT_PREFIX}Queue", encryption=sqs.QueueEncryption.KMS_MANAGED
        )

        # S3 Bucket to retrieve HL7v2 messages in proof of concept deployment
        test_server_output_bucket = s3.Bucket.from_bucket_name(
            self, f"{COMPONENT_PREFIX}OutputBucket", test_server_output_bucket_name
        )

        # Transform Lambda
        # Reference implementation of Custom Transform component of Transform Execution Environment

        transform_lambda = lambda_.Function(
            self,
            f"{COMPONENT_PREFIX}TransformLambda",
            handler="transform.handler",
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.from_asset(
                path.join(dirname, "../../lambda"),
                bundling={
                    "image": lambda_.Runtime.PYTHON_3_8.bundling_docker_image,
                    "command": [
                        "bash",
                        "-c",
                        " && ".join(
                            [
                                "pip install --no-cache-dir -r requirements.txt -t /asset-output",
                                "(tar -c --exclude-from=exclude.lst -f - .)|(cd /asset-output; tar -xf -)",
                            ]
                        ),
                    ],
                },
            ),
            timeout=core.Duration.seconds(60),
            environment=dict(
                SQS_QUEUE=queue.queue_url,
                # The following parameter is optional
                S3_BUCKET_NAME=test_server_output_bucket_name,
            ),
        )
        queue.grant_send_messages(transform_lambda)

        # API Gateway with Lambda construct (using https://aws.amazon.com/solutions/constructs/patterns)
        # Reference implementation of Custom Transform component of Transform Execution Environment

        api_lambda = apigw_lambda.ApiGatewayToLambda(
            self,
            "ApiGw",
            existing_lambda_obj=transform_lambda,
            api_gateway_props=apigw.LambdaRestApiProps(
                handler=transform_lambda,
                proxy=False,
                rest_api_name=f"{COMPONENT_PREFIX_DASHES}-api",
                endpoint_export_name=f"{COMPONENT_PREFIX}ApiEndPoint",
                description=f"{COMPONENT_PREFIX} APIGW with Transform Lambda (FHIR to HL7v2)",
                default_method_options=apigw.MethodOptions(
                    authorization_type=apigw.AuthorizationType.IAM,
                ),
                policy=iam.PolicyDocument(
                    statements=[
                        iam.PolicyStatement(
                            actions=["execute-api:Invoke"],
                            effect=iam.Effect.ALLOW,
                            principals=[
                                iam.ArnPrincipal(imported_resource_router_lambda_role),
                            ],
                            resources=["execute-api:/*/*/*"],
                        )
                    ]
                ),
            ),
        )
        rest_api = api_lambda.api_gateway
        persistence = rest_api.root.add_resource("persistence")
        resource_type = persistence.add_resource("{resource_type}")
        resource_type.add_method("POST")
        resource_id = resource_type.add_resource("{id}")
        resource_id.add_method("GET")
        resource_id.add_method("PUT")
        resource_id.add_method("DELETE")

        # ECS Fargate Container (HL7v2 sender)
        # This container implements Connectivity Manager component
        # of Transform Execution Environment

        vpc = ec2.Vpc.from_lookup(self, "DefaultVpc", vpc_id=vpc_id)

        cluster = ecs.Cluster(self, f"{COMPONENT_PREFIX}Cluster", vpc=vpc)

        ecs_patterns.QueueProcessingFargateService(
            self,
            f"{COMPONENT_PREFIX}Service",
            cluster=cluster,
            image=ecs.ContainerImage.from_asset(path.join(dirname, "../../container")),
            queue=queue,
            desired_task_count=1,
            log_driver=ecs.LogDriver.aws_logs(
                stream_prefix=f"{COMPONENT_PREFIX}HL7Client",
                log_retention=logs.RetentionDays.ONE_DAY,
            ),
            environment=dict(
                SERVER_NAME=hl7_server_name,
                PORT_NUMBER=hl7_port,
            ),
        )

        # The following permission grants are needed to support
        # read interactions with integration transform
        test_server_output_bucket.grant_read(transform_lambda)

        transform_lambda.add_to_role_policy(
            iam.PolicyStatement(
                actions=["s3:ListBucket"],
                effect=iam.Effect.ALLOW,
                resources=[test_server_output_bucket.bucket_arn],
            )
        )
        transform_lambda.add_to_role_policy(
            iam.PolicyStatement(
                actions=["s3:GetObject"],
                effect=iam.Effect.ALLOW,
                resources=[test_server_output_bucket.arn_for_objects("*")],
            )
        )

        # CloudFormation Stack outputs
        # The following outputs needed to configure FHIR Works on AWS API interface
        core.CfnOutput(
            self,
            "TransformApiRootUrl",
            value=rest_api.url,
            export_name="TransformApiRootUrl",
        )
        core.CfnOutput(
            self,
            "TransformApiRegion",
            value=self.region,
            export_name="TransformApiRegion",
        )
        core.CfnOutput(
            self,
            "TransformApiAccountId",
            value=self.account,
            export_name="TransformApiAccountId",
        )
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ### S3 ###

        source_csv_bucket = _s3.Bucket(self,
                                       "BYODValidationSourceBucket",
                                       versioned=True)

        target_csv_bucket = _s3.Bucket(
            self,
            "BYODValidationTargetBucket",
            removal_policy=core.RemovalPolicy.RETAIN)

        webtool_bucket = _s3.Bucket(
            self,
            "WebToolBucket",
            website_index_document="index.html",
            website_error_document="index.html",
            public_read_access=True,
        )

        core.CfnOutput(self, "DVTRegion", value=self.region)
        core.CfnOutput(self,
                       "SourceS3Bucket",
                       value=source_csv_bucket.bucket_name)
        core.CfnOutput(self,
                       "TargetS3Bucket",
                       value=target_csv_bucket.bucket_name)
        core.CfnOutput(self,
                       "WebToolS3Bucket",
                       value=webtool_bucket.bucket_name)
        core.CfnOutput(self,
                       "WebToolUrl",
                       value=webtool_bucket.bucket_website_url)

        ### Stager Function ###
        stager_function = _lambda.Function(self,
                                           "StagerFunction",
                                           runtime=_lambda.Runtime.NODEJS_12_X,
                                           code=_lambda.Code.from_asset(
                                               os.path.join(
                                                   dirname, "lambda",
                                                   "stager")),
                                           handler='index.handler')

        stager_function.add_environment("REGION", self.region)
        stager_function.add_environment("SOURCE_BUCKET",
                                        source_csv_bucket.bucket_name)
        stager_function.add_environment("STAGE_BUCKET",
                                        target_csv_bucket.bucket_name)
        source_csv_bucket.grant_read(stager_function)
        target_csv_bucket.grant_put(stager_function)
        core.CfnOutput(self,
                       "StagerLambdaFunction",
                       value=stager_function.function_name)

        ### Profiling Queue
        profiling_job_queue = _sqs.Queue(self, "ProfilingJobQueue")
        core.CfnOutput(self,
                       "SQSProfileQueue",
                       value=profiling_job_queue.queue_url)

        ### Cognito ###

        userpool = _cognito.UserPool(self,
                                     "WebToolUserPool",
                                     user_pool_name="byod-webtool-userpool",
                                     self_sign_up_enabled=True,
                                     auto_verify={
                                         "email": True,
                                         "phone": False
                                     },
                                     user_verification={
                                         "email_subject":
                                         "Your verification code",
                                         "email_body":
                                         "Your verification code is {####}",
                                         "email_style":
                                         _cognito.VerificationEmailStyle.CODE
                                     },
                                     standard_attributes={
                                         "email": {
                                             "required": True,
                                             "mutable": False
                                         }
                                     },
                                     password_policy={})
        client = userpool.add_client(
            "webtool-app-client",
            auth_flows={
                "custom": True,
                "user_password": True,
                "user_srp": True,
                #"refresh_token": True
            })
        identity_pool = _cognito.CfnIdentityPool(
            self,
            "WebToolCognitoIdentityPool",
            allow_unauthenticated_identities=True)
        identity_pool.add_property_override(
            "CognitoIdentityProviders",
            [{
                "ClientId": client.user_pool_client_id,
                "ProviderName": userpool.user_pool_provider_name
            }])
        auth_role = _iam.Role(
            self,
            "CognitoAuthRole",
            assumed_by=WebIdentityPrincipal(
                "cognito-identity.amazonaws.com", {
                    "StringEquals": {
                        "cognito-identity.amazonaws.com:aud": identity_pool.ref
                    },
                    "ForAnyValue:StringLike": {
                        "cognito-identity.amazonaws.com:amr": "authenticated"
                    }
                }))
        auth_role.add_to_policy(
            PolicyStatement(effect=Effect.ALLOW,
                            actions=["s3:GetObject", "s3:PutObject"],
                            resources=["%s/*" % target_csv_bucket.bucket_arn]))

        auth_role.add_to_policy(
            PolicyStatement(effect=Effect.ALLOW,
                            actions=["lambda:invokeFunction"],
                            resources=[stager_function.function_arn]))

        auth_role.add_to_policy(
            PolicyStatement(effect=Effect.ALLOW,
                            actions=["sqs:*"],
                            resources=[profiling_job_queue.queue_arn]))

        unauth_role = _iam.Role(
            self,
            "CognitoUnauthRole",
            assumed_by=_iam.WebIdentityPrincipal(
                "cognito-identity.amazonaws.com",
                conditions={
                    "StringEquals": {
                        "cognito-identity.amazonaws.com:aud": identity_pool.ref
                    },
                    "ForAnyValue:StringLike": {
                        "cognito-identity.amazonaws.com:amr": "unauthenticated"
                    }
                }))
        identity_pool_policy = _cognito.CfnIdentityPoolRoleAttachment(
            self,
            "WebToolCognitoIdentityPoolPolicy",
            identity_pool_id=identity_pool.ref,
            roles={
                'unauthenticated': unauth_role.role_arn,
                'authenticated': auth_role.role_arn
            })
        core.CfnOutput(self, "UserPoolId", value=userpool.user_pool_id)
        core.CfnOutput(self, "IdentityPoolId", value=identity_pool.ref)
        core.CfnOutput(self, "ClientId", value=client.user_pool_client_id)
        core.CfnOutput(self,
                       "ProviderName",
                       value=userpool.user_pool_provider_name)

        ### DynamoDB ###

        validation_job_table = _dynamodb.Table(
            self,
            "ValidationJobTable",
            partition_key=_dynamodb.Attribute(
                name="id", type=_dynamodb.AttributeType.STRING))

        ## AppSync ###

        api = _appsync.GraphqlApi(
            self,
            "Api",
            name="validation-job-api",
            schema=_appsync.Schema.from_asset(
                os.path.join(dirname, "api", "schema.graphql")),
            authorization_config=AuthorizationConfig(
                default_authorization=AuthorizationMode(
                    authorization_type=AuthorizationType.USER_POOL,
                    user_pool_config=UserPoolConfig(user_pool=userpool))),
            log_config=LogConfig(exclude_verbose_content=False,
                                 field_log_level=FieldLogLevel.ALL))
        api_ds = api.add_dynamo_db_data_source("ValidationJobDataSource",
                                               validation_job_table)

        from aws_cdk.aws_appsync import MappingTemplate

        api_ds.create_resolver(
            type_name="Query",
            field_name="listJobss",
            request_mapping_template=MappingTemplate.from_file(
                os.path.join(dirname, "api", "resolvers",
                             "Query.listJobss.req.vtl")),
            response_mapping_template=MappingTemplate.from_file(
                os.path.join(dirname, "api", "resolvers",
                             "Query.listJobss.res.vtl")))

        api_ds.create_resolver(
            type_name="Query",
            field_name="getJobs",
            request_mapping_template=MappingTemplate.from_file(
                os.path.join(dirname, "api", "resolvers",
                             "Query.getJobs.req.vtl")),
            response_mapping_template=MappingTemplate.from_file(
                os.path.join(dirname, "api", "resolvers",
                             "Query.getJobs.res.vtl")))

        core.CfnOutput(self, "GraphQLEndpoint", value=api.graphql_url)

        ### SQS ###

        validation_job_queue = _sqs.Queue(self, "ValidationJobQueue")

        ### Lambda ###
        validation_trigger_function = _lambda.Function(
            self,
            "ValidationTriggerFunction",
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset(
                os.path.join(dirname, "lambda", "validation_trigger")),
            handler='lambda_function.lambda_handler')

        validation_trigger_function.add_environment(
            "TABLE_NAME", validation_job_table.table_name)
        validation_trigger_function.add_environment(
            "QUEUE_URL", validation_job_queue.queue_url)

        validation_trigger_function.add_event_source(
            _S3EventSource(source_csv_bucket,
                           events=[_s3.EventType.OBJECT_CREATED]))

        source_csv_bucket.grant_read(validation_trigger_function)
        validation_job_table.grant_read_write_data(validation_trigger_function)
        validation_job_queue.grant_send_messages(validation_trigger_function)

        ### ECS Fargate ###

        validation_fargate_asset = _ecr_assets.DockerImageAsset(
            self,
            "ValidationBuildImage",
            directory=os.path.join(dirname, "fargate", "validation"))
        profiling_fargate_asset = _ecr_assets.DockerImageAsset(
            self,
            "ProfilingBuildImage",
            directory=os.path.join(dirname, "fargate", "profiling"))

        vpc = _ec2.Vpc(self, "VPC", max_azs=3)
        cluster = _ecs.Cluster(self, "ECSCluster", vpc=vpc)

        validation_fargate_service = _ecs_patterns.QueueProcessingFargateService(
            self,
            "ValidationFargateService",
            cluster=cluster,
            cpu=4096,
            memory_limit_mib=30720,
            enable_logging=True,
            image=_ecs.ContainerImage.from_docker_image_asset(
                validation_fargate_asset),
            environment={
                "TABLE_NAME": validation_job_table.table_name,
                "QUEUE_URL": validation_job_queue.queue_url,
                "SOURCE_BUCKET_NAME": source_csv_bucket.bucket_name,
                "TARGET_BUCKET_NAME": target_csv_bucket.bucket_name,
                "REGION": self.region
            },
            queue=validation_job_queue,
            max_scaling_capacity=2,
            max_healthy_percent=200,
            min_healthy_percent=66)
        validation_fargate_service.task_definition.task_role.add_managed_policy(
            _iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonDynamoDBFullAccess"))
        validation_fargate_service.task_definition.task_role.add_managed_policy(
            _iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonS3FullAccess"))

        profiling_fargate_service = _ecs_patterns.QueueProcessingFargateService(
            self,
            "ProfilingFargateService",
            cluster=cluster,
            cpu=4096,
            memory_limit_mib=30720,
            enable_logging=True,
            image=_ecs.ContainerImage.from_docker_image_asset(
                profiling_fargate_asset),
            environment={
                "TABLE_NAME": validation_job_table.table_name,
                "QUEUE_URL": profiling_job_queue.queue_url,
                "SOURCE_BUCKET_NAME": source_csv_bucket.bucket_name,
                "TARGET_BUCKET_NAME": target_csv_bucket.bucket_name,
                "REGION": self.region
            },
            queue=profiling_job_queue,
            max_scaling_capacity=2,
            max_healthy_percent=200,
            min_healthy_percent=66)
        profiling_fargate_service.task_definition.task_role.add_managed_policy(
            _iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonDynamoDBFullAccess"))
        profiling_fargate_service.task_definition.task_role.add_managed_policy(
            _iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonS3FullAccess"))
Ejemplo n.º 6
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ### VPC

        # create a new VPC with 2 AZ's and two NAT gateways
        # TODO - include VPC SQS endpoint so the NAT gateway isn't needed anymore
        vpc = aws_ec2.Vpc(self,
                          "Vpc",
                          max_azs=2,
                          nat_gateways=2,
                          subnet_configuration=[
                              aws_ec2.SubnetConfiguration(
                                  name="private",
                                  cidr_mask=24,
                                  subnet_type=aws_ec2.SubnetType.PRIVATE),
                              aws_ec2.SubnetConfiguration(
                                  name="public",
                                  cidr_mask=28,
                                  subnet_type=aws_ec2.SubnetType.PUBLIC)
                          ])

        # create a new ECS cluster
        cluster = aws_ecs.Cluster(self, "FargateSQS", vpc=vpc)

        ### SQS

        # create a new SQS queue
        msg_queue = aws_sqs.Queue(self,
                                  "SQSQueue",
                                  visibility_timeout=core.Duration.seconds(0),
                                  retention_period=core.Duration.minutes(30))

        ### FARGATE

        # build the docker image from local "./docker" directory
        sqscontainer = aws_ecs.ContainerImage.from_asset(directory="docker")

        # add the aws-xray-daemon as a sidecar running on UDP/2000
        xraycontainer = aws_ecs.ContainerImage.from_registry(
            "amazon/aws-xray-daemon")

        # create the queue processing service on fargate with a locally built container
        # the pattern automatically adds an environment variable with the queue name for the container to read
        fargate_service = aws_ecs_patterns.QueueProcessingFargateService(
            self,
            "Service",
            cluster=cluster,
            memory_limit_mib=512,
            cpu=256,
            image=sqscontainer,
            enable_logging=True,
            desired_task_count=0,
            max_scaling_capacity=5,
            scaling_steps=
            [{
                "upper": 0,
                "change": -5
            }, {
                "lower": 1,
                "change": +1
            }
             # disabled metric based scaling to test scaling on cpu usage only
             # this may potentially lower cost as fargate will scale in smaller steps
             #{"lower": 50000, "change": +2},
             #{"lower": 250000, "change": +4}
             ],
            queue=msg_queue,
            environment={"sqs_queue_url": msg_queue.queue_url})

        # add the standard aws xray sidecar to the container task
        xray_sidecar = fargate_service.task_definition.add_container(
            "xraycontainer",
            image=xraycontainer,
            logging=fargate_service.log_driver)

        # expose the sidecar on port UDP/2000
        xray_sidecar.add_port_mappings(
            aws_ecs.PortMapping(container_port=2000,
                                protocol=aws_ecs.Protocol.UDP))

        ### LAMBDA

        # build the go binary for the lambda SQS generator and retrieve the unix timestamp of when the file was modified
        # since CDK cannot natively build Go binaries yet, we need to do this manually through build_lambda_zip.py
        os.system("python loadgen/build_lambda_zip.py")
        filets = str(int(os.path.getctime("./loadgen/lambda.zip")))

        # create a lambda function to generate load, using the filets value as a source hash for the zip
        sqs_lambda = aws_lambda.Function(
            self,
            "GenerateLoadSQS",
            runtime=aws_lambda.Runtime.GO_1_X,
            code=aws_lambda.Code.from_asset("./loadgen/lambda.zip",
                                            source_hash=filets),
            handler="loadgensqs",
            timeout=core.Duration.seconds(20),
            memory_size=128,
            retry_attempts=0,
            tracing=aws_lambda.Tracing.ACTIVE,
            environment={
                "sqs_queue_url": msg_queue.queue_url,
                "total_message_count": "100"
            })

        ### CLOUDWATCH RULE

        # create a new cloudwatch rule running every minute to trigger the lambda function
        eventRuleMinu = aws_events.Rule(
            self,
            "lambda-generator-minute-rule",
            enabled=True,
            schedule=aws_events.Schedule.cron(minute="*"))

        eventRuleMinu.add_target(aws_events_targets.LambdaFunction(sqs_lambda))

        ### IAM

        # add the Lambda IAM permission to send SQS messages
        msg_queue.grant_send_messages(sqs_lambda)

        # add XRay permissions to Fargate task and Lambda
        xray_policy = PolicyStatement(resources=["*"],
                                      actions=[
                                          "xray:GetGroup", "xray:GetGroups",
                                          "xray:GetSampling*", "xray:GetTime*",
                                          "xray:GetService*",
                                          "xray:PutTelemetryRecords",
                                          "xray:PutTraceSegments"
                                      ])

        fargate_service.task_definition.add_to_task_role_policy(xray_policy)
        sqs_lambda.add_to_role_policy(xray_policy)