Exemplo n.º 1
0
    def __init__(self, scope: core.Construct, id: str, Description: str,
                 Uuid: str, **kwargs) -> None:
        super().__init__(scope, id)

        with open(kwargs["HandlerPath"], encoding="utf-8") as fp:
            code_body = fp.read()

        resource = cfn.CustomResource(
            self,
            "Resource",
            provider=cfn.CustomResourceProvider.lambda_(
                lambda_.SingletonFunction(
                    self,
                    "Singleton",
                    description=Description,
                    uuid=Uuid,
                    code=lambda_.InlineCode(code_body),
                    handler="index.main",
                    timeout=core.Duration.seconds(300),
                    runtime=lambda_.Runtime.PYTHON_3_7,
                    initial_policy=kwargs["ResourcePolicies"],
                    log_retention=logs.RetentionDays.ONE_DAY,
                )),
            properties=kwargs,
        )
        # response
        self.response = resource.get_att("Response")
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id)

        with open("crd_function/crd.py") as fp:
            code_body = fp.read()

        crd_lambda = lambda_.SingletonFunction(
            self, "Singleton",
            uuid=str(uuid4()),
            code=lambda_.InlineCode(code_body),
            handler="index.lambda_handler",
            timeout=core.Duration.seconds(300),
            runtime=lambda_.Runtime.PYTHON_3_7,
        )
        crd_lambda.add_to_role_policy(
            statement=iam.PolicyStatement(
                actions=["inspector:SubscribeToEvent"],
                resources=["*"]
            )
        )

        resource = cfn.CustomResource(
            self, "Resource",
            provider=cfn.CustomResourceProvider.lambda_(handler=crd_lambda),
            properties=kwargs,
        )

        self.response = resource.get_att("Response").to_string()
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id)

        table_name = kwargs.get("table_name")
        table_arn = kwargs.get("table_arn")

        function = lambda_.SingletonFunction(
            self,
            "Singleton",
            uuid="22fbda4b-ee9f-4317-9489-c118134d8e97",
            code=lambda_.Code.asset("./lambda/load-ddb-data.zip"),
            handler="lambda_function.handler",
            timeout=core.Duration.seconds(300),
            runtime=lambda_.Runtime.PYTHON_3_7,
            environment={'TABLE_NAME': table_name})

        function.add_to_role_policy(
            iam.PolicyStatement(actions=['dynamodb:PutItem'],
                                resources=["*"],
                                effect=iam.Effect.ALLOW))

        resource = cfn.CustomResource(
            self,
            "Resource",
            provider=cfn.CustomResourceProvider.lambda_(function),
            properties=kwargs,
        )

        self.response = resource.get_att("Response").to_string()
    def __init__(self, scope: core.Construct, id: str,
                 props: CustomResourceProps, **kwargs) -> None:
        super().__init__(scope, id)

        name = props.name
        lambda_directory = props.lambda_directory
        handler = props.handler
        timeout = props.timeout
        runtime = props.runtime
        environment = props.environment
        resource_properties = props.resource_properties
        lambda_uuid = str(uuid.uuid5(uuid.NAMESPACE_DNS, name + handler))

        resource = cfn.CustomResource(
            self,
            "Resource",
            provider=cfn.CustomResourceProvider.lambda_(
                lambda_.SingletonFunction(
                    self,
                    "Singleton",
                    environment=environment,
                    function_name=name,
                    uuid=lambda_uuid,
                    code=lambda_.AssetCode(lambda_directory),
                    handler=handler,
                    timeout=core.Duration.seconds(timeout),
                    runtime=runtime,
                )),
            properties=resource_properties,
        )
        self.resource = resource
        self.response = resource.get_att("Response").to_string()
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id)

        es_host = kwargs.get("es_host")
        es_region = kwargs.get("es_region")
        es_domain_arn = kwargs.get("es_domain_arn")

        function = lambda_.SingletonFunction(
            self,
            "Singleton",
            uuid="e43d1f1e-5676-415c-84d5-d376069aa0da",
            code=lambda_.Code.asset("./lambda/load-es-index.zip"),
            handler="lambda_function.handler",
            timeout=core.Duration.seconds(300),
            runtime=lambda_.Runtime.PYTHON_3_7,
            environment={
                'ES_HOST': es_host,
                'ES_REGION': es_region
            })

        function.add_to_role_policy(
            iam.PolicyStatement(actions=['es:ESHttpPost', 'es:ESHttpPut'],
                                resources=[es_domain_arn],
                                effect=iam.Effect.ALLOW))

        resource = cfn.CustomResource(
            self,
            "Resource",
            provider=cfn.CustomResourceProvider.lambda_(function),
            properties=kwargs,
        )

        self.response = resource.get_att("Response").to_string()
Exemplo n.º 6
0
    def setup_iot_endpoint_provider(self):
        describe_endpoint_policy = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=["iot:DescribeEndpoint"],
            resources=["*"],
        )

        provider_lambda = aws_lambda.SingletonFunction(
            self,
            "iot_data_ats_endpoint_provider_lambda",
            uuid="iot_data_ats_endpoint_provider_lambda_20200507150213",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset("custom_resources/iot_endpoint"),
            handler="iot_endpoint_provider.on_event",
            description="Returns iot:Data-ATS endpoint for this account",
            current_version_options=aws_lambda.VersionOptions(
                removal_policy=core.RemovalPolicy.DESTROY),
            initial_policy=[describe_endpoint_policy],
        )

        provider = custom_resources.Provider(self,
                                             "iot_data_ats_endpoint_provider",
                                             on_event_handler=provider_lambda)

        iot_endpoint = core.CustomResource(
            self,
            "iot_data_ats_endpoint",
            resource_type="Custom::IotDataAtsEndpoint",
            service_token=provider.service_token,
        )

        endpoint_address = iot_endpoint.get_att("EndpointAddress").to_string()

        self._parameters_to_save["iot_endpoint_address"] = endpoint_address
Exemplo n.º 7
0
    def __init__(self, scope: cdk.Construct, id: str, **kwargs) -> None:
        super().__init__(
            scope,
            id,
        )

        with open("custom-resource-handler.py", encoding="utf-8") as fp:
            code_body = fp.read()

        resource = cfn.CustomResource(
            self,
            "Resource",
            provider=cfn.CustomResourceProvider.lambda_(
                lambda_.SingletonFunction(
                    self,
                    "Singleton",
                    uuid="f7d4f730-4ee1-11e8-9c2d-fa7ae01bbebc",
                    code=lambda_.InlineCode(code_body),
                    handler="index.main",
                    timeout=300,
                    runtime=lambda_.Runtime.PYTHON27,
                )),
            properties=kwargs,
        )

        self.response = resource.get_att("Response").to_string()
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id)

        with open("custom_resource/random_string_generator_lambda_function.py", encoding="utf-8") as fp:
            code_body = fp.read()


        # Use `uuidgen` in bash to generate new ones
        random_string_generator_fn=lambda_.SingletonFunction(
            self, "Singleton",
            uuid="RANDOMF2-F7DB-4561-B7AC-4C9730D10E95",
            code=lambda_.InlineCode(code_body),
            handler="index.lambda_handler",
            timeout=core.Duration.seconds(300),
            runtime=lambda_.Runtime.PYTHON_3_7,
        )


        resource = cfn.CustomResource(
            self, "Resource",
            provider=cfn.CustomResourceProvider.lambda_(
                random_string_generator_fn
            ),
            properties=kwargs,
        )

        self.response = resource.get_att("Response").to_string()
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id)

        # Read Lambda Code:)
        try:
            with open(
                    "waf_stacks/custom_resources/waf_rate_rule_creator/lambda_src/index.py",
                    encoding="utf-8",
                    mode="r") as f:
                waf_rate_rule_creator_fn_code = f.read()
        except OSError:
            print("Unable to read Lambda Function Code")
            raise

        # Create IAM Permission Statements that are required by the Lambda

        role_stmt1 = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=["*"],
            actions=["wafv2:GetWebACL", "wafv2:UpdateWebACL"])
        role_stmt1.sid = "AllowLambdaToCreateWafRules"

        waf_rate_rule_creator_fn = _lambda.SingletonFunction(
            self,
            "waFRateRuleCreatorSingleton",
            uuid="mystique30-4ee1-11e8-9c2d-fa7ae01bbebc",
            code=_lambda.InlineCode(waf_rate_rule_creator_fn_code),
            handler="index.lambda_handler",
            timeout=core.Duration.seconds(10),
            runtime=_lambda.Runtime.PYTHON_3_7,
            reserved_concurrent_executions=1,
            environment={
                "LOG_LEVEL": "INFO",
                "APP_ENV": "Production"
            },
            description="Creates a rate based WAF rule")

        waf_rate_rule_creator_fn.add_to_role_policy(role_stmt1)

        # Create Custom Log group
        waf_rate_rule_creator_fn_lg = _logs.LogGroup(
            self,
            "wafRateRuleCreatorLogGroup",
            log_group_name=
            f"/aws/lambda/{waf_rate_rule_creator_fn.function_name}",
            retention=_logs.RetentionDays.ONE_WEEK,
            removal_policy=core.RemovalPolicy.DESTROY)

        waf_rate_rule_creator = cfn.CustomResource(
            self,
            "wafRateRuleCreatorCustomResource",
            provider=cfn.CustomResourceProvider.lambda_(
                waf_rate_rule_creator_fn),
            properties=kwargs,
        )

        self.response = waf_rate_rule_creator.get_att(
            "rule_add_status").to_string()
    def __init__(self, scope: core.Construct, id: str, bucket_name: str,
                 uuid: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        bucket_storage = _s3.LifecycleRule(transitions=[
            _s3.Transition(storage_class=_s3.StorageClass.INTELLIGENT_TIERING,
                           transition_after=core.Duration.days(1))
        ])

        self.__bucket = _s3.Bucket(self,
                                   'S3Bucket',
                                   bucket_name=bucket_name,
                                   removal_policy=core.RemovalPolicy.DESTROY,
                                   encryption=_s3.BucketEncryption.KMS_MANAGED,
                                   lifecycle_rules=[bucket_storage])

        with open('common/common_cdk/lambda/empty_bucket.py', 'r') as f:
            lambda_source = f.read()

        empty_bucket_lambda = _lambda.SingletonFunction(
            self,
            'EmptyBucketLambda',
            uuid=uuid,
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            timeout=core.Duration.minutes(15))

        empty_bucket_lambda.role.add_to_policy(
            _iam.PolicyStatement(actions=[
                's3:DeleteObject', 's3:DeleteObjectVersion',
                's3:ListBucketVersions', 's3:ListBucket'
            ],
                                 resources=[
                                     self.__bucket.bucket_arn + '/*',
                                     self.__bucket.bucket_arn
                                 ]))

        empty_bucket_lambda_provider = _custom_resources.Provider(
            self,
            'EmptyBucketLambdaProvider',
            on_event_handler=empty_bucket_lambda)

        custom_resource = core.CustomResource(
            self,
            'EmptyBucketCustomResource',
            service_token=empty_bucket_lambda_provider.service_token,
            properties={"bucket_name": self.__bucket.bucket_name})

        custom_resource.node.add_dependency(self.__bucket)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id)

        with open("custom_resource/iam_user_tagger_lambda_function.py",
                  encoding="utf-8") as fp:
            code_body = fp.read()

        statement = iam.PolicyStatement()
        # https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html
        statement.add_actions("iam:TagUser")
        statement.add_actions("iam:UntagUser")
        statement.add_all_resources()
        statement.effect = iam.Effect.ALLOW

        iam_tagger_fn = lambda_.SingletonFunction(
            self,
            "Singleton",
            uuid="tagger30-4ee1-11e8-9c2d-fa7ae01bbebc",
            code=lambda_.InlineCode(code_body),
            handler="index.lambda_handler",
            timeout=core.Duration.seconds(300),
            runtime=lambda_.Runtime.PYTHON_3_7,
        )

        iam_tagger_fn.add_to_role_policy(statement)
        """ 
        resource = cfn.CustomResource(
            self, "Resource",
            provider=cfn.CustomResourceProvider.lambda_(
                lambda_.SingletonFunction(
                    self, "Singleton",
                    uuid="f7d4f730-4ee1-11e8-9c2d-fa7ae01bbebc",
                    code=lambda_.InlineCode(code_body),
                    handler="index.main",
                    timeout=core.Duration.seconds(300),
                    runtime=lambda_.Runtime.PYTHON_3_7,
                )
            ),
            properties=kwargs,
        )
        """

        resource = cfn.CustomResource(
            self,
            "Resource",
            provider=cfn.CustomResourceProvider.lambda_(iam_tagger_fn),
            properties=kwargs,
        )

        self.response = resource.get_att("Response").to_string()
Exemplo n.º 12
0
    def create_custom_authorizer_signing_key_generic(
            self, unique_id, description, token_value) -> core.CustomResource:
        """
        Uses a Lambda to create an asymmetric key pair, since neither CFn nor CDK support that as of
        this writing (2020-05-09)
        https://github.com/aws-cloudformation/aws-cloudformation-coverage-roadmap/issues/337

        After creating the key, it signs the token value using the private key, and stores all of
        `token_value`, `token_value`'s signature, and the public key in the stack's parameter store.

        :return: the CustomResource for the signing key
        """
        create_authorizer_policy = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "kms:CreateKey", "kms:GetPublicKey", "kms:ScheduleKeyDeletion",
                "kms:Sign"
            ],
            resources=["*"],
        )
        provider_lambda = aws_lambda.SingletonFunction(
            self,
            f"iot_custom_authorizer_key_provider_lambda_{unique_id}",
            uuid=
            f"iot_custom_authorizer_key_provider_lambda_20200507150213_{unique_id}",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset(
                "custom_resources/iot_custom_authorizer_key_provider"),
            handler="iot_custom_authorizer_key_provider.on_event",
            description=description,
            current_version_options=aws_lambda.VersionOptions(
                removal_policy=core.RemovalPolicy.DESTROY),
            initial_policy=[create_authorizer_policy],
        )

        provider = custom_resources.Provider(
            self,
            f"iot_custom_authorizer_key_provider_{unique_id}",
            on_event_handler=provider_lambda,
        )

        iot_custom_authorizer_key = core.CustomResource(
            self,
            f"iot_custom_authorizer_key_{unique_id}",
            resource_type="Custom::IoTCustomAuthorizer",
            service_token=provider.service_token,
            properties={"token_value": token_value},
        )

        return iot_custom_authorizer_key
Exemplo n.º 13
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id)

        print('reading code source...')
        with open("./cdk_blog_vpc/lambda/lambda_function.py",
                  encoding="utf-8") as fp:
            code_body = fp.read()

        my_lambda_role = iam.Role(
            self,
            "Role_lambda",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"))

        my_lambda_role.add_to_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                resources=["*"],
                actions=[
                    "logs:*", "ec2:DescribeVpcs", "ec2:DescribeInstances",
                    "ec2:DescribeInstanceAttribute", "dynamodb:PutItem",
                    "ec2:DescribeSubnets", "ec2:DescribeVpcPeeringConnections",
                    "ec2:DescribeRouteTables", "ec2:CreateRoute",
                    "ec2:ReplaceRouteTableAssociation", "ec2:CreateRouteTable",
                    "ec2:DisassociateRouteTable", "ec2:AssociateRouteTable",
                    "ec2:DeleteRoute", "ec2:ReplaceRoute",
                    "ec2:DeleteRouteTable"
                ]))

        _uuid = uuid.uuid1()
        resource = cfn.CustomResource(
            self,
            "Resource",
            provider=cfn.CustomResourceProvider.lambda_(
                aws_lambda.SingletonFunction(
                    self,
                    "Singleton",
                    uuid=str(_uuid),
                    code=aws_lambda.Code.from_asset("./cdk_blog_vpc/lambda"),
                    handler="lambda_function.lambda_handler",
                    timeout=core.Duration.seconds(300),
                    runtime=aws_lambda.Runtime.PYTHON_3_7,
                    role=my_lambda_role)),
            properties=kwargs,
        )

        self.response = resource.get_att("Response").to_string()
    def __init__(self, scope: core.Construct, id: str, config_params, ** kwargs) -> None:
        super().__init__(scope, id)

        # Read LambdaFunction Code
        try:
            with open("load_generator_stacks/custom_resources/trigger_run_task/lambda_src/trigger_run_task_lambda_function.py", encoding="utf-8") as fp:
                code_body = fp.read()
        except OSError:
            print('Unable to read UserData script')

        # Create IAM Permission Statements that are required by the Lambda

        trigger_run_task_fn = _lambda.SingletonFunction(
            self, "Singleton",
            uuid="mystique2010-4ee1-11e8-9c2d-fa7ae01bbebc",
            code=_lambda.InlineCode(code_body),
            handler="index.lambda_handler",
            timeout=core.Duration.seconds(300),
            runtime=_lambda.Runtime.PYTHON_3_7,
            environment={
                "RUN_TASK_FN_ARN": config_params.get("RUN_TASK_FN_ARN")
            },
            # security_group=config_params.get('RUN_TASK_FN_ARN'),

        )
        roleStmt1 = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=["*"],
            actions=["lambda:InvokeFunction"]
        )
        roleStmt1.sid = "AllowLambdaToInvokeLambda"
        trigger_run_task_fn.add_to_role_policy(roleStmt1)

        resource = cfn.CustomResource(
            self, "Resource",
            provider=cfn.CustomResourceProvider.lambda_(
                trigger_run_task_fn
            ),
            properties=kwargs,
        )

        self.response = resource.get_att("Response").to_string()
    def __init__(self, scope: core.Construct, id: str, secgroup_name: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        with open('common/common_cdk/lambda/empty_security_group.py', 'r') as f:
            lambda_source = f.read()

        # lambda utils to empty security group before deletion
        empty_secgroup_lambda = _lambda.SingletonFunction(self, 'EmptySecurityGroupLambda',
                                                          uuid="dfs3k8730-4ee1-11e8-9c2d-fdfs65dfsc",
                                                          runtime=_lambda.Runtime.PYTHON_3_7,
                                                          code=_lambda.Code.inline(lambda_source),
                                                          handler='index.handler',
                                                          function_name='ara-auto-empty-secgroup'
                                                          )

        empty_secgroup_lambda_role = _iam.Role(
            self, 'AutoEmptyBucketLambdaRole',
            assumed_by=_iam.ServicePrincipal('lambda.amazonaws.com')
        )

        empty_secgroup_lambda_role.add_to_policy(
            _iam.PolicyStatement(
                actions=[
                    'ec2:RevokeSecurityGroupIngress',
                    'ec2:RevokeSecurityGroupEgress'
                ],
                resources=['arn:aws:ec2::'+core.Aws.ACCOUNT_ID+':security-group/'+secgroup_name]
            )
        )

        empty_secgroup_lambda_provider = _custom_resources.Provider(
            self, 'EmptyBucketLambdaProvider',
            on_event_handler=empty_secgroup_lambda
        )

        core.CustomResource(
            self, 'EmptyBucketCustomResource',
            service_token=empty_secgroup_lambda_provider.service_token,
            properties={
                "secgroup_name": secgroup_name
            }
        )
    def __init__(self, scope: core.Construct, id: str, ** kwargs) -> None:
        super().__init__(scope, id)

        # Read Lambda Function Code):
        # Read Lambda Code
        try:
            with open("cognito_identity_provider/custom_resources/cognito_app_client_secret_retriever/lambda_src/index.py",
                      encoding="utf-8",
                      mode="r") as f:
                cognito_app_client_secret_retriever_fn_code = f.read()
        except OSError:
            print("Unable to read Lambda Function Code")
            raise

        # Create IAM Permission Statements that are required by the Lambda

        roleStmt1 = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=["*"],
            actions=["cognito-idp:DescribeUserPoolClient"]
        )
        roleStmt1.sid = "AllowLambdaToDescribeCognitoUserPool"

        roleStmt2 = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=["*"],
            actions=["secretsmanager:CreateSecret",
                     "secretsmanager:TagResource",
                     "secretsmanager:UpdateSecret",
                     "secretsmanager:DeleteSecret"]
        )
        roleStmt2.sid = "AllowLambdaToAddSecrets"

        cognito_app_client_secret_retriever_fn = _lambda.SingletonFunction(
            self,
            "Singleton",
            uuid="mystique30-4ee1-11e8-9c2d-fa7ae01bbebc",
            code=_lambda.InlineCode(
                cognito_app_client_secret_retriever_fn_code),
            handler="index.lambda_handler",
            timeout=core.Duration.seconds(10),
            runtime=_lambda.Runtime.PYTHON_3_7,
            reserved_concurrent_executions=1,
            environment={
                "LOG_LEVEL": "INFO",
                "APP_ENV": "Production"
            }
        )

        cognito_app_client_secret_retriever_fn.add_to_role_policy(roleStmt1)
        cognito_app_client_secret_retriever_fn.add_to_role_policy(roleStmt2)

        # Create Custom Loggroup
        cognito_app_client_secret_retriever_fn_lg = _logs.LogGroup(
            self,
            "cognitoAppClientSecretRetriever",
            log_group_name=f"/aws/lambda/{cognito_app_client_secret_retriever_fn.function_name}",
            retention=_logs.RetentionDays.ONE_WEEK,
            removal_policy=core.RemovalPolicy.DESTROY
        )

        user_pool_secrets_creator = cfn.CustomResource(
            self, "Resource",
            provider=cfn.CustomResourceProvider.lambda_(
                cognito_app_client_secret_retriever_fn
            ),
            properties=kwargs,
        )

        self.response = user_pool_secrets_creator.get_att(
            "user_pool_secrets_arn").to_string()
Exemplo n.º 17
0
    def setup_custom_authorizer_user_pass(self):
        custom_authorizer_name = self.custom_auth_user_pass_default_authorizer_name
        self._parameters_to_save[
            "custom_authorizer_user_pass_name"] = custom_authorizer_name
        token_key_name = "IoTTokenKeyName"
        self._parameters_to_save[
            "custom_authorizer_user_pass_token_key_name"] = token_key_name
        token_value = "allow"
        self._parameters_to_save[
            "custom_authorizer_user_pass_token_value"] = token_value
        self._parameters_to_save[
            "custom_authorizer_user_pass_username"] = self.custom_auth_user_pass_username
        self._parameters_to_save[
            "custom_authorizer_user_pass_password"] = self.custom_auth_user_pass_password

        iot_custom_authorizer_key_resource = self.create_custom_authorizer_signing_key_generic(
            "2",
            "Manages an asymmetric CMK and token signature for iot custom authorizer with username and password.",
            token_value,
        )

        custom_authorizer_token_signature = iot_custom_authorizer_key_resource.get_att(
            "custom_authorizer_token_signature").to_string()
        self._parameters_to_save[
            "custom_authorizer_user_pass_token_signature"] = custom_authorizer_token_signature

        # TODO: remove forcing of us-east-1 when enhanced custom authorizers are available in all regions
        # Force region to 'us-east-1' due to enhanced custom authorizers only available in this region
        authorizer_function_arn = self.setup_custom_authorizer_function(
            "2",
            "custom_resources/iot_custom_authorizer_user_pass_function",
            "iot_custom_authorizer_user_pass.handler",
            "Sample custom authorizer that allows or denies based on username and password",
            {
                "custom_auth_user_pass_username":
                self.custom_auth_user_pass_username,
                "custom_auth_user_pass_password":
                self.custom_auth_user_pass_password
            },
            "us-east-1",
        )
        create_authorizer_policy = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "iot:CreateAuthorizer",
                "iot:UpdateAuthorizer",
                "iot:DeleteAuthorizer",
                "iot:UpdateDomainConfiguration",
                "iot:CreateDomainConfiguration",
                "iot:DescribeDomainConfiguration",
                "iot:DeleteDomainConfiguration",
            ],
            resources=["*"],
        )
        provider_lambda = aws_lambda.SingletonFunction(
            self,
            "iot_custom_authorizer_user_pass_provider_lambda",
            uuid=
            "iot_custom_authorizer_user_pass_provider_lambda_20200727123737",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset(
                "custom_resources/iot_custom_authorizer_user_pass_provider"),
            handler="iot_custom_authorizer_user_pass_provider.on_event",
            description=
            "Sets up an IoT custom authorizer for user password & required domain config due to beta status",
            environment={
                "custom_auth_user_pass_uuid":
                self.custom_auth_user_pass_uuid,
                "custom_auth_user_pass_default_authorizer_name":
                self.custom_auth_user_pass_default_authorizer_name,
                "custom_auth_user_pass_domain_configuration_name":
                self.custom_auth_user_pass_domain_configuration_name
            },
            current_version_options=aws_lambda.VersionOptions(
                removal_policy=core.RemovalPolicy.DESTROY),
            initial_policy=[create_authorizer_policy],
        )

        provider = custom_resources.Provider(
            self,
            "iot_custom_authorizer_user_pass_provider",
            on_event_handler=provider_lambda)

        public_key = iot_custom_authorizer_key_resource.get_att(
            "custom_authorizer_public_key").to_string()

        iot_endpoint = core.CustomResource(
            self,
            "iot_custom_authorizer_user_pass",
            resource_type="Custom::IoTCustomAuthorizer",
            service_token=provider.service_token,
            properties={
                "authorizer_function_arn": authorizer_function_arn,
                "authorizer_name": custom_authorizer_name,
                "public_key": public_key,
                "token_key_name": token_key_name,
            },
        )
        endpoint_address = iot_endpoint.get_att(
            "BetaEndpointAddress").to_string()
        self._parameters_to_save[
            "iot_beta_endpoint_address"] = endpoint_address
Exemplo n.º 18
0
    def setup_custom_authorizer(self):
        # These values are used in the custom authorizer setup, and exported to Parameter Store
        # for use by integration tests
        custom_authorizer_name = "iot_custom_authorizer"
        self._parameters_to_save[
            "custom_authorizer_name"] = custom_authorizer_name

        # Note: "key" is a bit overloaded here. In the context of the custom authorizer, "key name"
        # refers to the HTTP header field that the custom authorizer looks for a token value in.
        #
        # In the case of the custom authorizer key provider, the "key" is the KMS asymmetric CMK
        # used to sign the token value passed in the `token_key_name` header. In order to keep the
        # terminology consistent between client integ tests that are expecting to pass something for
        # a "key name" field, we'll let the ambiguity stand.
        token_key_name = "iot_custom_authorizer_token"
        self._parameters_to_save[
            "custom_authorizer_token_key_name"] = token_key_name

        token_value = "allow"
        self._parameters_to_save["custom_authorizer_token_value"] = token_value

        iot_custom_authorizer_key_resource = self.create_custom_authorizer_signing_key_generic(
            "1",
            "Manages an asymmetric CMK and token signature for iot custom authorizer.",
            token_value,
        )

        custom_authorizer_token_signature = iot_custom_authorizer_key_resource.get_att(
            "custom_authorizer_token_signature").to_string()
        self._parameters_to_save[
            "custom_authorizer_token_signature"] = custom_authorizer_token_signature

        authorizer_function_arn = self.setup_custom_authorizer_function(
            "1",
            "custom_resources/iot_custom_authorizer_function",
            "iot_custom_authorizer.handler",
            "Sample custom authorizer that allows or denies based on 'token' value",
            {},
            self.region,
        )

        create_authorizer_policy = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "iot:CreateAuthorizer", "iot:UpdateAuthorizer",
                "iot:DeleteAuthorizer"
            ],
            resources=["*"],
        )
        provider_lambda = aws_lambda.SingletonFunction(
            self,
            "iot_custom_authorizer_provider_lambda",
            uuid=self.custom_auth_user_pass_uuid,
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset(
                "custom_resources/iot_custom_authorizer_provider"),
            handler="iot_custom_authorizer_provider.on_event",
            description="Sets up an IoT custom authorizer",
            current_version_options=aws_lambda.VersionOptions(
                removal_policy=core.RemovalPolicy.DESTROY),
            initial_policy=[create_authorizer_policy],
        )

        provider = custom_resources.Provider(self,
                                             "iot_custom_authorizer_provider",
                                             on_event_handler=provider_lambda)

        public_key = iot_custom_authorizer_key_resource.get_att(
            "custom_authorizer_public_key").to_string()

        core.CustomResource(
            self,
            "iot_custom_authorizer",
            resource_type="Custom::IoTCustomAuthorizer",
            service_token=provider.service_token,
            properties={
                "authorizer_function_arn": authorizer_function_arn,
                "authorizer_name": custom_authorizer_name,
                "public_key": public_key,
                "token_key_name": token_key_name,
            },
        )
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create bucket and upload scrips 
        bucket = s3.Bucket(self, "ScriptBucket")

        self.script_bucket = bucket

        s3deploy.BucketDeployment(self, "UploadScriptsToBucket",
            sources=[s3deploy.Source.asset(os.path.join(dirname, "scripts"))],
            destination_bucket=bucket
        )

        # Greengrass Core Thing policy
        greengrass_core_policy = iot.CfnPolicy(self,
            'GreenGrassCorePolicy',
            policy_name='greengrass-demo-policy',
            policy_document={
                "Version": "2012-10-17",
                "Statement": [
                    {
                        "Effect": "Allow",
                        "Action": [
                            "iot:Publish",
                            "iot:Subscribe",
                            "iot:Connect",
                            "iot:Receive"
                        ],
                        "Resource": [
                            "*"
                        ]
                    },
                    {
                        "Effect": "Allow",
                        "Action": [
                            "iot:GetThingShadow",
                            "iot:UpdateThingShadow",
                            "iot:DeleteThingShadow"
                        ],
                        "Resource": [
                            "*"
                        ]
                    },
                    {
                        "Effect": "Allow",
                        "Action": [
                            "greengrass:*"
                        ],
                        "Resource": [
                            "*"
                        ]
                    }
                ]
            }
        )

        self.core_policy_name = greengrass_core_policy.policy_name

        # Create a Greengrass group role
        greengrass_group_role = iam.Role(self, "GroupRole",
            assumed_by=iam.ServicePrincipal("greengrass.amazonaws.com")
        )
        greengrass_group_role.add_to_policy(iam.PolicyStatement(
            resources=["arn:aws:logs:*:*:*"],
            actions=[
                "logs:CreateLogGroup",
                "logs:CreateLogStream",
                "logs:PutLogEvents"
            ]
        ))
        greengrass_group_role.add_to_policy(iam.PolicyStatement(
            resources=["*"],
            actions=["iot:*"]
        ))
     
        self.greengrass_group_role_arn = greengrass_group_role.role_arn
        
        # A custom resource to verify that there is a service role for greengrass on the account 
        greengrass_mgmt_function = awslambda.SingletonFunction(
            self,
            "MgmttHandler",
            uuid="58854ea2-0624-4ca5-b600-fa88d4b9164e",
            runtime=awslambda.Runtime.PYTHON_3_7,
            code=awslambda.Code.asset("custom_resources"),
            handler="greengrassmgmt.handler",
        )

        greengrass_mgmt_function.add_to_role_policy(
            iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=[
                        'greengrass:*',
                        'iot:*',
                        'iam:CreateRole',
                        'iam:AttachRolePolicy',
                        'iam:PassRole'
                    ],
                    resources=['*']
                )
        )

        greengrass_mgmt_provider = cust_resource.Provider(self, "MgmtProvider",
            on_event_handler=greengrass_mgmt_function
        )

        core.CustomResource(self, "MgmtCustResource", 
            service_token=greengrass_mgmt_provider.service_token
        )
    def __init__(
            self,
            scope: core.Construct,
            id: str,
            ssh_key_name="mystique-automation-ssh-key",
            ** kwargs
    ) -> None:
        super().__init__(scope, id)

        # Read Lambda Code:)
        try:
            with open("custom_resources/ssh_key_generator/lambda_src/index.py",
                      encoding="utf-8",
                      mode="r"
                      ) as f:
                ssh_key_generator_fn_code = f.read()
        except OSError:
            print("Unable to read Lambda Function Code")
            raise

        # Create IAM Permission Statements that are required by the Lambda

        role_stmt1 = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=["*"],
            actions=[
                "ec2:CreateKeyPair",
                "ec2:DeleteKeyPair"
            ]
        )
        role_stmt1.sid = "AllowLambdaToCreateSshKey"
        role_stmt2 = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=["*"],
            actions=[
                "ssm:PutParameter",
                "ssm:DeleteParameter",
                "ssm:GetParameter"
            ]
        )
        role_stmt2.sid = "AllowLambdaToCreateSSMParameter"

        ssh_key_generator_fn = _lambda.SingletonFunction(
            self,
            "sshKeyGeneratorSingleton",
            uuid="mystique30-4ee1-11e8-9c2d-fa7ae01bbebc",
            code=_lambda.InlineCode(
                ssh_key_generator_fn_code),
            handler="index.lambda_handler",
            timeout=core.Duration.seconds(10),
            runtime=_lambda.Runtime.PYTHON_3_7,
            reserved_concurrent_executions=1,
            environment={
                "LOG_LEVEL": "INFO",
                "APP_ENV": "Production",
                "SSH_KEY_NAME": ssh_key_name
            },
            description="Creates a SSH Key in the region"
        )

        ssh_key_generator_fn.add_to_role_policy(role_stmt1)
        ssh_key_generator_fn.add_to_role_policy(role_stmt2)

        # Create Custom Log group
        ssh_key_generator_fn_lg = _logs.LogGroup(
            self,
            "sshKeyGeneratorLogGroup",
            log_group_name=f"/aws/lambda/{ssh_key_generator_fn.function_name}",
            retention=_logs.RetentionDays.ONE_WEEK,
            removal_policy=core.RemovalPolicy.DESTROY
        )

        ssh_key_generator = cfn.CustomResource(
            self,
            "sshKeyGeneratorCustomResource",
            provider=cfn.CustomResourceProvider.lambda_(
                ssh_key_generator_fn
            ),
            properties=kwargs,
        )

        self.response = ssh_key_generator.get_att(
            "ssh_key_gen_status").to_string()
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Add the VPC connection arn as an input parameter
        vpc_conn_arn = core.CfnParameter(
            self,
            "VpcConnectionArn",
            type="String",
            description="The Arn of the VPC connection to use for Redshift.")

        quicksight_group_arn = core.Fn.import_value('ara-QuickSight-Group-Arn')
        secret_arn = core.Fn.import_value('ara-QuickSight-Redshift-Secret-Arn')

        # Create the custom resource policy with the necessary permissions
        iam_policy = cr.AwsCustomResourcePolicy.from_statements([
            iam.PolicyStatement(actions=cfg.CDK_POLICY_ACTIONS,
                                resources=['*']),
        ])

        redshift_datasource_lambda = lambda_.SingletonFunction(
            self,
            id='RedshiftDatasourceLambda',
            uuid='b438edeb-f5dc-486a-ac2d-bc0918b975b8',
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.from_asset('dataviz_redshift/lambda'),
            handler='redshift_datasource.handler',
            function_name='ara_redshift_datasource')

        redshift_datasource_lambda.role.add_to_policy(
            iam.PolicyStatement(actions=['secretsmanager:GetSecretValue'],
                                resources=[secret_arn]))

        redshift_datasource_lambda.role.add_to_policy(
            iam.PolicyStatement(actions=[
                'quicksight:CreateDataSource', 'quicksight:DeleteDataSource'
            ],
                                resources=['*']))

        lambda_provider = cr.Provider(
            self,
            id='LambdaProvider',
            on_event_handler=redshift_datasource_lambda)

        responseLamb = core.CustomResource(
            self,
            'RedshiftDatasourceResource',
            service_token=lambda_provider.service_token,
            properties={
                'Secret_arn': secret_arn,
                'Datasource_name': cfg.REDSHIFT_DATASOURCE_NAME,
                'Aws_account_id': self.account,
                'Quicksight_group_arn': quicksight_group_arn,
                'Datasource_actions': cfg.DATASOURCE_ACTIONS,
                'Vpc_conn_arn': vpc_conn_arn.value_as_string
            })

        redshift_datasource_arn = responseLamb.get_att_string('datasource_arn')

        core.CfnOutput(self,
                       "RedshiftDataSourceArn",
                       description="Redshift Data Source Arn",
                       value=redshift_datasource_arn)

        # Create a Redshift dataset with custom SQL
        redshift_dataset_arn = QuickSightRedshiftDataset(
            self,
            'RedshiftDataset',
            iam_policy=iam_policy,
            quicksight_group_arn=quicksight_group_arn,
            redshift_datasource_arn=redshift_datasource_arn,
            redshift_dataset_name=cfg.REDSHIFT_DATASET_NAME,
            dataset_actions=cfg.DATASET_ACTIONS,
            redshift_custom_sql=cfg.REDSHIFT_CUSTOM_SQL,
            redshift_columns=cfg.REDSHIFT_COLUMNS,
            redshift_data_transformations=cfg.REDSHIFT_DATA_TRANSFORMATIONS
        ).redshift_dataset_arn

        QuickSightRedshiftAnalysis(
            self,
            'RedshiftAnalysis',
            iam_policy=iam_policy,
            quicksight_group_arn=quicksight_group_arn,
            redshift_dataset_arn=redshift_dataset_arn,
            redshift_analysis_name=cfg.REDSHIFT_ANALYSIS_NAME,
            redshift_analysis_template_alias=cfg.
            REDSHIFT_ANALYSIS_TEMPLATE_ALIAS,
            analysis_actions=cfg.ANALYSIS_ACTIONS)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Parameters
        parameters = core.CfnParameter(
            self, "SourceBucket",
            description="Building on AWS Cognito Stack Modified https://github.com/rosberglinhares/CloudFormationCognitoCustomResources",
            default="default"
        )

        LogoutURL = core.CfnParameter(
            self, "LogoutURL",
            type="String",
            default="http://localhost"
        )

        CallbackURL = core.CfnParameter(
            self, "CallbackURL",
            type="String",
            default="http://localhost/callback"
        )

        AppDomain = core.CfnParameter(
            self, "AppDomain",
            type="String",
            default="default"
        )
        
        # CognitoSNSPolicy
        CognitoSNSPolicy = iam.CfnManagedPolicy(
            self, 'CognitoSNSPolicy',
            description='Managed policy to allow Amazon Cognito to access SNS',
            policy_document={
                "Version": "2012-10-17",
                "Statement": {
                    "Effect": "Allow",
                    "Action": ["sns:publish"],
                    "Resource": "*"
                }
            })

        # SNSRole
        SNSRole = iam.CfnRole(
            self, "SNSRole",
            role_name="SNSRole",
            managed_policy_arns=[CognitoSNSPolicy.ref],
            assume_role_policy_document={
                "Version": "2012-10-17",
                "Statement": [
                    {
                        "Effect": "Allow",
                        "Action": ["sts:AssumeRole"],
                        "Principal": {"Service": ["cognito-idp.amazonaws.com"]}
                    }]
            }
        )
        SNSRole.add_depends_on(CognitoSNSPolicy)

        # CognitoUserPool
        CognitoUserPool = cognito.CfnUserPool(
            self, 'UserPool',
            user_pool_name='photos-pool',
            alias_attributes=[
                "email", "phone_number"],
            auto_verified_attributes=[
                "email"],
            email_verification_message="Hi, Your verification code is <br/>{####}\n",
            email_verification_subject="EDX Email Verification",
            mfa_configuration="OPTIONAL",
            policies={
                "passwordPolicy": {
                    "minimumLength": 8,
                    "requireLowercase": True,
                    "requireNumbers": True,
                    "requireSymbols": True,
                    "requireUppercase": True
                }
            },
            schema=[{
                "attributeDataType": "String",
                "mutable": False,
                "name": "nickname",
                "required": True
            },
                {
                "attributeDataType": "String",
                "mutable": False,
                "name": "email",
                "required": True
            },
                {
                "attributeDataType": "String",
                "mutable": False,
                "name": "phone_number",
                "required": True
            }],
            sms_configuration={
                "externalId": "%s-external" % (core.Aws.STACK_NAME),
                "snsCallerArn": SNSRole.attr_arn
            }
        )

        # CognitoUserPoolClient
        CognitoUserPoolClient = cognito.CfnUserPoolClient(
            self, "UserPoolClient",
            client_name="WebsiteClient",
            generate_secret=True,
            user_pool_id=CognitoUserPool.ref
        )

        # CognitoCustomResourceRole
        CustomResourceRole = iam.CfnRole(
            self, "CustomResourceRole",
            role_name="cognito-resource-role",
            assume_role_policy_document={
                "Version": "2012-10-17",
                "Statement": [
                    {
                        "Effect": "Allow",
                        "Principal": {
                            "Service": [
                                "lambda.amazonaws.com"
                            ]
                        },
                        "Action": [
                            "sts:AssumeRole"
                        ]
                    }
                ]
            },
            policies=[
                {
                    "policyName": "writeCloudWatchLogs",
                    "policyDocument": {
                        "Version": "2012-10-17",
                        "Statement": [
                            {
                                "Effect": "Allow",
                                "Action": "logs:CreateLogGroup",
                                "Resource": "arn:aws:logs:*:*:*"
                            },
                            {
                                "Effect": "Allow",
                                "Action": "logs:CreateLogStream",
                                "Resource": "arn:aws:logs:*:*:*"
                            },
                            {
                                "Effect": "Allow",
                                "Action": "logs:PutLogEvents",
                                "Resource": "arn:aws:logs:*:*:*"
                            }
                        ]
                    }
                },
                {
                    "policyName": "updateUserPoolClient",
                    "policyDocument": {
                        "Version": "2012-10-17",
                        "Statement": [
                            {
                                "Effect": "Allow",
                                "Action": "cognito-idp:UpdateUserPoolClient",
                                "Resource": "arn:aws:cognito-idp:*:*:userpool/*"
                            }
                        ]
                    }
                },
                {
                    "policyName": "manageUserPoolDomain",
                    "policyDocument": {
                        "Version": "2012-10-17",
                        "Statement": [
                            {
                                "Effect": "Allow",
                                "Action": "cognito-idp:CreateUserPoolDomain",
                                "Resource": "arn:aws:cognito-idp:*:*:userpool/*"
                            },
                            {
                                "Effect": "Allow",
                                "Action": "cognito-idp:DeleteUserPoolDomain",
                                "Resource": "arn:aws:cognito-idp:*:*:userpool/*"
                            },
                            {
                                "Effect": "Allow",
                                "Action": "cognito-idp:DescribeUserPoolDomain",
                                "Resource": "*"
                            },
                            {
                                "Effect": "Allow",
                                "Action": "cognito-idp:DescribeUserPoolClient",
                                "Resource": "*"
                            }
                        ]
                    }
                },
                {
                    "policyName": "invokeLambdaFunction",
                    "policyDocument": {
                        "Version": "2012-10-17",
                        "Statement": [
                            {
                                "Effect": "Allow",
                                "Action": "lambda:InvokeFunction",
                                "Resource": "arn:aws:lambda:*:*:function:*"
                            }
                        ]
                    }
                },
            ]
        )

        # CognitoUserPoolClientClientSettings
        with open("./cdk/CognitoUserPoolClientClientSettings/index.js", encoding="utf-8") as fp:
            code_body = fp.read()

        CognitoUserPoolClientClientSettings = cfn.CustomResource(
            self, "CognitoUserPoolClientClientSettings",
            provider=cfn.CustomResourceProvider.lambda_(
                lambda_.SingletonFunction(
                    self, "CognitoUserPoolClientClientSettingsLambda",
                    uuid="f7d4f730-4ee1-11e8-9c2d-fa7ae01bbebc",
                    code=lambda_.InlineCode(code_body),
                    handler="index.handler",
                    runtime=lambda_.Runtime.NODEJS_8_10,
                    role=iam.Role.from_role_arn(
                        self, 'CustomResourceRoleiam', role_arn=CustomResourceRole.attr_arn)
                )
            ),
            properties={"UserPoolId": CognitoUserPool.ref,
                        "UserPoolClientId": CognitoUserPoolClient.ref,
                        "AppDomain": AppDomain.value_as_string,
                        "SupportedIdentityProviders": ['COGNITO'],
                        "CallbackURL": CallbackURL.value_as_string,
                        "LogoutURL": LogoutURL.value_as_string,
                        "AllowedOAuthFlowsUserPoolClient": True,
                        "AllowedOAuthFlows": ['code'],
                        "AllowedOAuthScopes": ['openid']
                        },
        )

        # CognitoIdPool
        CognitoIdPool = cognito.CfnIdentityPool(
            self, 'CognitoIdPool',
            identity_pool_name='edxcognitoidpool',
            cognito_identity_providers=[{
                "clientId": CognitoUserPoolClient.ref,
                "providerName": CognitoUserPool.attr_provider_name
            }],
            allow_unauthenticated_identities=False
        )

        # Output
        core.CfnOutput(self, "CognitoUserPoolIdOutput",
                       value=CognitoUserPool.ref,
                       description="The Pool ID of the Cognito User Pool",
                       export_name="CognitoUserPoolId"
                       )
        core.CfnOutput(self, "CognitoUserPoolProviderURLOutput",
                       value=CognitoUserPool.attr_provider_url,
                       description="The Pool ProviderURL of the Cognito User Pool",
                       export_name="CognitoUserPoolProviderURL"
                       )
        core.CfnOutput(self, "CognitoUserPoolArnOutput",
                       value=CognitoUserPool.attr_arn,
                       description="The Pool Arn of the Cognito User Pool",
                       export_name="CognitoUserPoolArn"
                       )
        core.CfnOutput(self, "CognitoUserPoolClientIdOutput",
                       value=CognitoUserPoolClient.ref,
                       description="The App Client ID ",
                       export_name="CognitoUserPoolClientId"
                       )
        core.CfnOutput(self, "ClientSecretOutput",
                       value=core.Fn.get_att(
                           "CognitoUserPoolClientClientSettings", "ClientSecret").to_string(),
                       description="The Client Secret ",
                       export_name="ClientSecret"
                       )
Exemplo n.º 23
0
    def __init__(self, scope: core.Construct, id: str, cluster: eks.Cluster,
                 kafka: msk.CfnCluster, vpc: ec2.Vpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        pip.main([
            "install", "--system", "--target", "custom_resources/kafka/lib",
            "kafka-python"
        ])
        arn = cr.AwsCustomResource(
            self,
            'clusterArn',
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(resources=['*']),
            on_create=cr.AwsSdkCall(
                action='listClusters',
                service='Kafka',
                physical_resource_id=cr.PhysicalResourceId.of(
                    "ClusterNameFilter"),
                parameters={
                    "ClusterNameFilter": kafka.cluster_name,
                    "MaxResults": 1
                },
            ),
        )

        bootstraps = cr.AwsCustomResource(
            self,
            'clusterBootstraps',
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(resources=["*"]),
            on_create=cr.AwsSdkCall(
                action='getBootstrapBrokers',
                service='Kafka',
                physical_resource_id=cr.PhysicalResourceId.of("ClusterArn"),
                parameters={
                    "ClusterArn":
                    arn.get_response_field("ClusterInfoList.0.ClusterArn")
                },
            ),
        )

        manifests = []
        for namespace in self.node.try_get_context("kubernetes")['namespaces']:
            manifests.append({
                "apiVersion": "v1",
                "kind": "ConfigMap",
                "metadata": {
                    "name": "kafka",
                    "namespace": namespace
                },
                "data": {
                    "bootstrap":
                    bootstraps.get_response_field('BootstrapBrokerStringTls'),
                }
            })
        eks.KubernetesManifest(self,
                               "kafka-config",
                               cluster=cluster,
                               manifest=manifests)

        function = lbd.SingletonFunction(
            self,
            "KafkaConfigFunction",
            uuid="b09329a3-5206-46f7-822f-337da714aeac",
            code=lbd.Code.from_asset("custom_resources/kafka/"),
            handler="config.handler",
            runtime=lbd.Runtime.PYTHON_3_7,
            function_name="kafkaConfig",
            log_retention=logs.RetentionDays.ONE_DAY,
            security_group=ec2.SecurityGroup.from_security_group_id(
                self, "lambdaKafkaVPC", vpc.vpc_default_security_group),
            timeout=core.Duration.seconds(30),
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(one_per_az=True))

        provider = cr.Provider(self,
                               "KafkaConfigProvider",
                               on_event_handler=function,
                               log_retention=logs.RetentionDays.ONE_DAY)

        core.CustomResource(
            self,
            "KafkaLoadTopic",
            service_token=provider.service_token,
            properties={
                "bootstrap":
                bootstraps.get_response_field('BootstrapBrokerStringTls'),
                "topic":
                "load",
                "partitions":
                150,
                "replicas":
                1
            })

        core.CustomResource(
            self,
            "KafkaGenerateTopic",
            service_token=provider.service_token,
            properties={
                "bootstrap":
                bootstraps.get_response_field('BootstrapBrokerStringTls'),
                "topic":
                "generate",
                "partitions":
                200,
                "replicas":
                1
            })
Exemplo n.º 24
0
    def __init__(self, scope: core.Construct, id: str, log_bucket: _s3.Bucket,
                 config_table: _dynamodb.Table, tshirt_size: str,
                 sink_bucket: _s3.Bucket, web_sale_stream: str,
                 web_customer_stream: str, web_customer_address_stream: str,
                 kinesis_key: _kms.Key, vpc: _ec2.Vpc, **kwargs) -> None:

        super().__init__(scope, id, **kwargs)

        stack = core.Stack.of(self)

        stream_source_bucket = AutoEmptyBucket(
            self,
            'StreamSource',
            bucket_name='ara-stream-source-' + core.Aws.ACCOUNT_ID,
            uuid='95505f50-0276-11eb-adc1-0242ac120002')

        service_role = _iam.Role(
            self,
            'StreamEmrServiceRole',
            assumed_by=_iam.ServicePrincipal('elasticmapreduce.amazonaws.com'))

        service_role.add_managed_policy(
            _iam.ManagedPolicy.from_aws_managed_policy_name(
                'service-role/AmazonElasticMapReduceRole'))

        cluster_role = _iam.Role(
            self,
            'StreamEmrClusterRole',
            assumed_by=_iam.ServicePrincipal("ec2.amazonaws.com"))

        _iam.Policy(
            self,
            'StreamEmrClusterPolicy',
            statements=[
                _iam.PolicyStatement(actions=[
                    "glue:CreateDatabase",
                    "glue:UpdateDatabase",
                    "glue:DeleteDatabase",
                    "glue:GetDatabase",
                    "glue:GetDatabases",
                    "glue:CreateTable",
                    "glue:UpdateTable",
                    "glue:DeleteTable",
                    "glue:GetTable",
                    "glue:GetTables",
                    "glue:GetTableVersions",
                    "glue:CreatePartition",
                    "glue:BatchCreatePartition",
                    "glue:UpdatePartition",
                    "glue:DeletePartition",
                    "glue:BatchDeletePartition",
                    "glue:GetPartition",
                    "glue:GetPartitions",
                    "glue:BatchGetPartition",
                    "glue:CreateUserDefinedFunction",
                    "glue:UpdateUserDefinedFunction",
                    "glue:DeleteUserDefinedFunction",
                    "glue:GetUserDefinedFunction",
                    "glue:GetUserDefinedFunctions",
                    "cloudwatch:PutMetricData",
                    "dynamodb:ListTables",
                    "s3:HeadBucket",
                    "ec2:Describe*",
                ],
                                     resources=['*']),
                _iam.PolicyStatement(
                    actions=['s3:GetObject'],
                    resources=[
                        'arn:aws:s3:::' + ARA_BUCKET_NAME + BINARIES +
                        DataGenConfig.DSDGEN_INSTALL_SCRIPT, 'arn:aws:s3:::' +
                        ARA_BUCKET_NAME + BINARIES + DataGenConfig.JAR_FILE
                    ]),
                _iam.PolicyStatement(
                    actions=['s3:PutObject'],
                    resources=[log_bucket.bucket_arn + "/data-generator/*"]),
                _iam.PolicyStatement(
                    actions=[
                        "s3:AbortMultipartUpload", "s3:CreateBucket",
                        "s3:DeleteObject", "s3:GetBucketVersioning",
                        "s3:GetObject", "s3:GetObjectTagging",
                        "s3:GetObjectVersion", "s3:ListBucket",
                        "s3:ListBucketMultipartUploads",
                        "s3:ListBucketVersions", "s3:ListMultipartUploadParts",
                        "s3:PutBucketVersioning", "s3:PutObject",
                        "s3:PutObjectTagging"
                    ],
                    resources=[
                        sink_bucket.bucket_arn + '/*', sink_bucket.bucket_arn,
                        stream_source_bucket.bucket.bucket_arn + '/*',
                        stream_source_bucket.bucket.bucket_arn
                    ])
            ],
            roles=[cluster_role])

        cluster_role.add_managed_policy(
            _iam.ManagedPolicy.from_aws_managed_policy_name(
                'AmazonSSMManagedInstanceCore'))

        _iam.CfnInstanceProfile(self,
                                'StreamEmrClusterInstanceProfile',
                                roles=[cluster_role.role_name],
                                instance_profile_name=cluster_role.role_name)

        # Security Groups for the EMR cluster (private subnet)
        # https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-man-sec-groups.html#emr-sg-elasticmapreduce-master-private
        master_sg = _ec2.SecurityGroup(self,
                                       'ElasticMapReduce-Master-Private',
                                       vpc=vpc)
        slave_sg = _ec2.SecurityGroup(self,
                                      'ElasticMapReduce-Slave-Private',
                                      vpc=vpc)
        service_sg = _ec2.SecurityGroup(self,
                                        'ElasticMapReduce-ServiceAccess',
                                        vpc=vpc,
                                        allow_all_outbound=False)

        # Service SG used by the proxy instance
        service_sg.add_ingress_rule(master_sg, _ec2.Port.tcp(9443))
        service_sg.add_egress_rule(master_sg, _ec2.Port.tcp(8443))
        service_sg.add_egress_rule(slave_sg, _ec2.Port.tcp(8443))

        # EMR Master
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_icmp())
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_tcp())
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_udp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_icmp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_tcp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_udp())
        master_sg.add_ingress_rule(service_sg, _ec2.Port.tcp(8443))

        # EMR Slave
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_icmp())
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_tcp())
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_udp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_icmp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_tcp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_udp())
        slave_sg.add_ingress_rule(service_sg, _ec2.Port.tcp(8443))

        with open('common/common_cdk/lambda/datagen_config.py', 'r') as f:
            lambda_source = f.read()

        configure_datagen_function = _lambda.SingletonFunction(
            self,
            'StreamConfigureDatagenLambda',
            uuid="a9904dec-01cf-11eb-adc1-0242ac120002",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            function_name='stream-datagen-config',
            environment={
                'TABLE_NAME': config_table.table_name,
                'JAR_LOCATION': BINARIES_LOCATION + DataGenConfig.JAR_FILE,
            },
            timeout=core.Duration.seconds(10))

        configure_datagen_function.role.add_to_policy(
            _iam.PolicyStatement(actions=[
                'dynamodb:GetItem',
                'dynamodb:PutItem',
            ],
                                 resources=[config_table.table_arn]))

        emr_cluster = _emr.CfnCluster(
            self,
            'StreamEmrCluster',
            name="StreamDatagenCluster",
            job_flow_role=cluster_role.role_name,
            service_role=service_role.role_name,
            release_label='emr-5.30.1',
            visible_to_all_users=True,
            log_uri=log_bucket.s3_url_for_object() + "/data-generator",
            applications=[
                _emr.CfnCluster.ApplicationProperty(name='hadoop'),
                _emr.CfnCluster.ApplicationProperty(name='spark')
            ],
            bootstrap_actions=[
                _emr.CfnCluster.BootstrapActionConfigProperty(
                    name="dsdgen-install",
                    script_bootstrap_action=_emr.CfnCluster.
                    ScriptBootstrapActionConfigProperty(
                        path=BINARIES_LOCATION +
                        DataGenConfig.DSDGEN_INSTALL_SCRIPT))
            ],
            instances=_emr.CfnCluster.JobFlowInstancesConfigProperty(
                emr_managed_master_security_group=master_sg.security_group_id,
                emr_managed_slave_security_group=slave_sg.security_group_id,
                service_access_security_group=service_sg.security_group_id,
                ec2_subnet_id=vpc.private_subnets[0].subnet_id,
                core_instance_group=_emr.CfnCluster.
                InstanceGroupConfigProperty(instance_count=DataGenConfig.
                                            BATCH_CLUSTER_SIZE[tshirt_size],
                                            instance_type='m5.xlarge'),
                master_instance_group=_emr.CfnCluster.
                InstanceGroupConfigProperty(instance_count=1,
                                            instance_type='m4.large')))

        configure_datagen = _sfn_tasks.LambdaInvoke(
            self,
            "ConfigureDatagenTask",
            lambda_function=configure_datagen_function,
            payload=_sfn.TaskInput.from_text(
                '{'
                '"Param": "stream_iterator",'
                '"Module": "stream",'
                '"SinkBucket": "' + sink_bucket.s3_url_for_object() + '",'
                '"Parallelism": "' +
                str(int(DataGenConfig.STREAM_DATA_SIZE[tshirt_size]) * 2) +
                '",'
                '"DataSize": "' + DataGenConfig.STREAM_DATA_SIZE[tshirt_size] +
                '",'
                '"TmpBucket": "' +
                str(stream_source_bucket.bucket.s3_url_for_object()) + '"'
                '}'),
            result_path='$.Config')

        add_datagen_step = _sfn.CustomState(
            self,
            'StreamAddDataGenStep',
            state_json={
                "Type": "Task",
                "Resource": "arn:aws:states:::elasticmapreduce:addStep.sync",
                "Parameters": {
                    "ClusterId.$": "$.Emr.Cluster.Id",
                    "Step": {
                        "Name": "DatagenStep",
                        "ActionOnFailure": "CONTINUE",
                        "HadoopJarStep": {
                            "Jar": "command-runner.jar",
                            "Args.$": "$.Config.Payload.StepParam"
                        }
                    }
                },
                "ResultPath": "$.Step",
                "Next": "StreamUpdateIterator"
            })

        update_iterator = _sfn_tasks.DynamoUpdateItem(
            self,
            'StreamUpdateIterator',
            table=config_table,
            key={
                'param':
                _sfn_tasks.DynamoAttributeValue.from_string('stream_iterator')
            },
            update_expression=
            'SET iterator = if_not_exists(iterator, :start) + :inc',
            expression_attribute_values={
                ":inc": _sfn_tasks.DynamoAttributeValue.from_number(1),
                ":start": _sfn_tasks.DynamoAttributeValue.from_number(0)
            },
            result_path=_sfn.JsonPath.DISCARD)

        definition = configure_datagen \
            .next(add_datagen_step) \
            .next(update_iterator)

        datagen_stepfunctions = _sfn.StateMachine(
            self,
            "StreamDataGenStepFunctions",
            definition=definition,
            timeout=core.Duration.minutes(30))

        datagen_stepfunctions.add_to_role_policy(
            _iam.PolicyStatement(actions=[
                'elasticmapreduce:AddJobFlowSteps',
                'elasticmapreduce:DescribeStep'
            ],
                                 resources=['*']))

        step_trigger = _events.Rule(self,
                                    'StreamStepTrigger',
                                    schedule=_events.Schedule.cron(
                                        minute='0/10',
                                        hour='*',
                                        month='*',
                                        week_day='*',
                                        year='*'))

        step_trigger.add_target(
            _events_targets.SfnStateMachine(
                machine=datagen_stepfunctions,
                input=_events.RuleTargetInput.from_object({
                    "Emr": {
                        "Cluster": {
                            "Id": core.Fn.ref(emr_cluster.logical_id)
                        }
                    }
                })))

        with open('common/common_cdk/lambda/stepfunctions_trigger.py',
                  'r') as f:
            lambda_source = f.read()

        stepfunctions_trigger_lambda = _lambda.SingletonFunction(
            self,
            'StreamStepFunctionsTriggerLambda',
            uuid="cf042246-01d0-11eb-adc1-0242ac120002",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            function_name='stepfunctions-stream-datagen-trigger')

        stepfunctions_trigger_lambda.role.add_to_policy(
            _iam.PolicyStatement(actions=["states:StartExecution"],
                                 resources=['*']))

        trigger_step_lambda_provider = _custom_resources.Provider(
            self,
            'StreamStepFunctionsTriggerLambdaProvider',
            on_event_handler=stepfunctions_trigger_lambda)

        core.CustomResource(
            self,
            'StreamStepFunctionsTrigger',
            service_token=trigger_step_lambda_provider.service_token,
            properties={"stepArn": datagen_stepfunctions.state_machine_arn})

        with open('common/common_cdk/lambda/stream_generator.py', 'r') as f:
            lambda_source = f.read()

        sale_stream_generator_lambda = _lambda.Function(
            scope=self,
            id='WebSaleStreamGenerator',
            runtime=_lambda.Runtime.PYTHON_3_7,
            memory_size=2048,
            timeout=core.Duration.minutes(15),
            code=_lambda.Code.inline(lambda_source),
            handler='index.lambda_handler',
            environment={
                'REGION': core.Aws.REGION,
                'STREAM_NAME': web_sale_stream
            })

        stream_source_bucket.bucket.add_event_notification(
            _s3.EventType.OBJECT_CREATED,
            _s3_notifications.LambdaDestination(sale_stream_generator_lambda),
            _s3.NotificationKeyFilter(prefix='sale', suffix='csv'))

        sale_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(
                actions=[
                    "s3:DeleteObject",
                    "s3:GetObject",
                    "s3:ListBucket",
                ],
                resources=[
                    stream_source_bucket.bucket.bucket_arn + '/*',
                    stream_source_bucket.bucket.bucket_arn
                ]))

        sale_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(actions=["kinesis:PutRecords"],
                                 resources=[
                                     stack.format_arn(
                                         service='kinesis',
                                         resource='stream',
                                         resource_name=web_sale_stream)
                                 ]))

        sale_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(actions=['kms:GenerateDataKey'],
                                 resources=[
                                     stack.format_arn(
                                         service='kms',
                                         resource='key',
                                         sep='/',
                                         resource_name=kinesis_key.key_id)
                                 ]))

        customer_stream_generator_lambda = _lambda.Function(
            scope=self,
            id='WebCustomerStreamGenerator',
            runtime=_lambda.Runtime.PYTHON_3_7,
            memory_size=2048,
            timeout=core.Duration.minutes(15),
            code=_lambda.Code.inline(lambda_source),
            handler='index.lambda_handler',
            environment={
                'REGION': core.Aws.REGION,
                'STREAM_NAME': web_customer_stream
            })

        stream_source_bucket.bucket.add_event_notification(
            _s3.EventType.OBJECT_CREATED,
            _s3_notifications.LambdaDestination(
                customer_stream_generator_lambda),
            _s3.NotificationKeyFilter(prefix='customer', suffix='csv'))

        customer_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(
                actions=[
                    "s3:DeleteObject",
                    "s3:GetObject",
                    "s3:ListBucket",
                ],
                resources=[
                    stream_source_bucket.bucket.bucket_arn + '/*',
                    stream_source_bucket.bucket.bucket_arn
                ]))

        customer_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(actions=["kinesis:PutRecords"],
                                 resources=[
                                     stack.format_arn(
                                         service='kinesis',
                                         resource='stream',
                                         resource_name=web_customer_stream)
                                 ]))

        customer_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(actions=['kms:GenerateDataKey'],
                                 resources=[
                                     stack.format_arn(
                                         service='kms',
                                         resource='key',
                                         sep='/',
                                         resource_name=kinesis_key.key_id)
                                 ]))

        address_stream_generator_lambda = _lambda.Function(
            scope=self,
            id='WebCustomerAddressStreamGenerator',
            runtime=_lambda.Runtime.PYTHON_3_7,
            memory_size=2048,
            timeout=core.Duration.minutes(15),
            code=_lambda.Code.inline(lambda_source),
            handler='index.lambda_handler',
            environment={
                'REGION': core.Aws.REGION,
                'STREAM_NAME': web_customer_address_stream
            })

        stream_source_bucket.bucket.add_event_notification(
            _s3.EventType.OBJECT_CREATED,
            _s3_notifications.LambdaDestination(
                address_stream_generator_lambda),
            _s3.NotificationKeyFilter(prefix='address', suffix='csv'))

        address_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(
                actions=[
                    "s3:DeleteObject",
                    "s3:GetObject",
                    "s3:ListBucket",
                ],
                resources=[
                    stream_source_bucket.bucket.bucket_arn + '/*',
                    stream_source_bucket.bucket.bucket_arn
                ]))

        address_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(
                actions=["kinesis:PutRecords"],
                resources=[
                    stack.format_arn(service='kinesis',
                                     resource='stream',
                                     resource_name=web_customer_address_stream)
                ]))

        address_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(actions=['kms:GenerateDataKey'],
                                 resources=[
                                     stack.format_arn(
                                         service='kms',
                                         resource='key',
                                         sep='/',
                                         resource_name=kinesis_key.key_id)
                                 ]))
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        lambda_policies = [
            iam.PolicyStatement(actions=[
                "logs:CreateLogStream", "logs:PutLogEvents",
                "logs:CreateLogGroup"
            ],
                                effect=iam.Effect.ALLOW,
                                resources=[
                                    "arn:aws:logs:" + core.Aws.REGION + ":" +
                                    core.Aws.ACCOUNT_ID + ":*"
                                ]),
            iam.PolicyStatement(actions=["dynamodb:*"],
                                effect=iam.Effect.ALLOW,
                                resources=[
                                    "arn:aws:dynamodb:" + core.Aws.REGION +
                                    ":" + core.Aws.ACCOUNT_ID + ":*"
                                ])
        ]

        base_api = _apigw.RestApi(self,
                                  'PetclinicApiGatewayWithCors',
                                  rest_api_name='PetclinicApiGatewayWithCors')

        api_resource = base_api.root.add_resource('api')

        website_bucket = _s3.Bucket(self,
                                    'PetclinicWebsite',
                                    website_index_document='index.html',
                                    public_read_access=True,
                                    removal_policy=core.RemovalPolicy.DESTROY)

        deployment = _s3deploy.BucketDeployment(
            self,
            'PetclinicDeployWebsite',
            sources=[_s3deploy.Source.asset('./spring-petclinic-static')],
            destination_bucket=website_bucket,
            retain_on_delete=False
            #destination_key_prefix='web/static'
        )

        # Modify the config.js with CF custome resource
        modify_policy = [
            iam.PolicyStatement(actions=[
                "s3:PutObject", "s3:PutObjectAcl", "s3:PutObjectVersionAcl",
                "s3:GetObject"
            ],
                                effect=iam.Effect.ALLOW,
                                resources=[website_bucket.bucket_arn + "/*"]),
            iam.PolicyStatement(actions=["s3:ListBucket"],
                                effect=iam.Effect.ALLOW,
                                resources=[website_bucket.bucket_arn]),
            iam.PolicyStatement(actions=["dynamodb:*"],
                                effect=iam.Effect.ALLOW,
                                resources=[
                                    "arn:aws:dynamodb:" + core.Aws.REGION +
                                    ":" + core.Aws.ACCOUNT_ID + ":*"
                                ])
        ]

        with open("custom-resource-code/init.py", encoding="utf-8") as fp:
            code_body = fp.read()

        dynamodb_tables = []

        for service in ['customer', 'vet', 'visit']:
            table = _dynamodb.Table(
                self,
                service.capitalize() + 'Table',
                partition_key={
                    'name': 'id',
                    'type': _dynamodb.AttributeType.STRING
                },
                removal_policy=core.RemovalPolicy.DESTROY,
                read_capacity=5,
                write_capacity=5,
            )

            dynamodb_tables.append(table.table_name)

            base_lambda = _lambda.Function(
                self,
                'ApiPetclinic' + service.capitalize() + 'Lambda',
                handler='org.springframework.samples.petclinic.' + service +
                's.StreamLambdaHandler::handleRequest',
                runtime=_lambda.Runtime.JAVA_8,
                code=_lambda.Code.asset(
                    './spring-petclinic-serverless/spring-petclinic-' +
                    service + 's-serverless/target/spring-petclinic-' +
                    service + 's-serverless-2.0.7.jar'),
                memory_size=1024,
                timeout=core.Duration.seconds(300),
                initial_policy=lambda_policies,
                environment={
                    "DYNAMODB_TABLE_NAME": table.table_name,
                    "SERVER_SERVLET_CONTEXT_PATH": "/api/" + service
                })
            base_version = base_lambda.add_version(
                name='v1',
                provisioned_executions=1)  #Added for warm the Java Lambda
            entity = api_resource.add_resource(service)
            entity.add_proxy(
                default_integration=_apigw.LambdaIntegration(base_version))
            self.add_cors_options(entity)

        resource = _cfn.CustomResource(
            self,
            "S3ModifyCustomResource",
            provider=_cfn.CustomResourceProvider.lambda_(
                _lambda.SingletonFunction(
                    self,
                    "CustomResourceSingleton",
                    uuid="f7d4f730-4ee1-11e8-9c2d-fa7ae01bbebc",
                    code=_lambda.InlineCode(code_body),
                    handler="index.handler",
                    timeout=core.Duration.seconds(300),
                    runtime=_lambda.Runtime.PYTHON_3_7,
                    initial_policy=modify_policy)),
            properties={
                "Bucket": website_bucket.bucket_name,
                "InvokeUrl": base_api.url,
                "DynamoDBTables": dynamodb_tables
            })

        core.CfnOutput(self,
                       "PetclinicWebsiteUrl",
                       export_name="PetclinicWebsiteUrl",
                       value=website_bucket.bucket_website_url)
Exemplo n.º 26
0
    def __init__(self, scope: Construct, id: str, vpc: _ec2.IVpc,
                 codebucket: IBucket, s3_deploy, metrics) -> None:
        super().__init__(scope, id)

        self._metrics_mapping = CfnMapping(
            self,
            'AnonymousData',
            mapping={'SendAnonymousData': {
                'Data': 'Yes'
            }})
        self._metrics_condition = CfnCondition(
            self,
            'AnonymousDatatoAWS',
            expression=Fn.condition_equals(
                self._metrics_mapping.find_in_map('SendAnonymousData', 'Data'),
                'Yes'))

        self._helper_func = _lambda.SingletonFunction(
            self,
            'SolutionHelper',
            uuid='75248a81-9138-468c-9ba1-bca6c7137599',
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler='lambda_function.handler',
            description=
            'This function generates UUID for each deployment and sends anonymous data to the AWS Solutions team',
            code=_lambda.Code.from_bucket(bucket=codebucket,
                                          key='app_code/solution_helper.zip'),
            vpc=vpc,
            timeout=Duration.seconds(30))
        self._helper_func.add_dependency(s3_deploy)

        self._lambda_provider = _custom_resources.Provider(
            self,
            'LambdaProvider',
            on_event_handler=self._helper_func,
            vpc=vpc)

        self._uuid = CustomResource(
            self,
            'UUIDCustomResource',
            service_token=self._lambda_provider.service_token,
            properties={"Resource": "UUID"},
            resource_type="Custom::CreateUUID",
            removal_policy=RemovalPolicy.DESTROY)

        self._send_data = CustomResource(
            self,
            'SendDataCustomResource',
            service_token=self._lambda_provider.service_token,
            properties={
                "Resource": "AnonymousMetric",
                "UUID": self._uuid.get_att_string("UUID"),
                "Solution": metrics["Solution"],
                "Data": metrics
            },
            resource_type='Custom::AnonymousData',
            removal_policy=RemovalPolicy.DESTROY)
        self._send_data.node.add_dependency(self._uuid)

        Aspects.of(self._helper_func).add(Condition(self._metrics_condition))
        Aspects.of(self._uuid).add(Condition(self._metrics_condition))
        Aspects.of(self._send_data).add(Condition(self._metrics_condition))
Exemplo n.º 27
0
	def __init__(self, scope: core.Construct, id: str, elastic: Elastic, vpc: ec2.Vpc, roles: list, cluster: eks.Cluster, **kwargs) -> None:
		super().__init__(scope, id, **kwargs)

		sm_policy = iam.PolicyStatement(
			actions=["secretsmanager:GetSecretValue"], 
			effect=iam.Effect.ALLOW, 
			resources=[elastic.secret.secret_arn]
		)

		es_policy = iam.PolicyStatement(
			actions=["es:DescribeElasticsearchDomain"], 
			effect=iam.Effect.ALLOW, 
			resources=[elastic.domain.domain_arn]
		)

		function = lbd.SingletonFunction(
			self,
			"ElasticsearchConfigFunction",
			uuid="e579d5f9-1709-43ea-b75f-9d1452ca7690",
			code=lbd.Code.from_asset(
				"custom_resources/elasticsearch/"
			),
			handler="config.handler",
			runtime=lbd.Runtime.PYTHON_3_7,
			function_name="elasticsearchConfig",
			initial_policy=[sm_policy,es_policy],
			log_retention=logs.RetentionDays.ONE_DAY,
			security_group=ec2.SecurityGroup.from_security_group_id(self, "lambdaVPC", vpc.vpc_default_security_group),
			timeout=core.Duration.seconds(30),
			vpc=vpc,
			vpc_subnets=ec2.SubnetSelection(
				one_per_az=True
			)
		)

		provider = cr.Provider(
			self, "ElasticsearchConfigProvider",
    	on_event_handler=function,
    	log_retention=logs.RetentionDays.ONE_DAY
		)

		core.CustomResource(
			self, "ElasticSearchConfig", 
			service_token=provider.service_token,
			properties={
				"domain": elastic.domain.domain_name,
				"secret": elastic.secret.secret_arn,
				"roles": [role.role_arn for role in roles],
				"shards": self.node.try_get_context("elastic")['shards'],
				"user": boto3.client('sts').get_caller_identity().get('Arn'),
				"replicas": self.node.try_get_context("elastic")['replicas']
			}
		)

		manifests = []
		for namespace in self.node.try_get_context("kubernetes")['namespaces']:
			manifests.append({
				"apiVersion": "v1",
				"kind": "ConfigMap",
				"metadata": {
					"name": "elasticsearch",
					"namespace": namespace
				},
				"data": {
					"url": elastic.domain.domain_endpoint
				}
			})
		eks.KubernetesManifest(
			self, 
			"elastic-search-cm", 
			cluster=cluster,
			manifest=manifests
		)		
Exemplo n.º 28
0
	def __init__(self, scope: core.Construct, id: str, cluster: eks.Cluster, **kwargs) -> None:
		super().__init__(scope, id, **kwargs)

		maps= []
		self.roles=[]

		ecr_policy = iam.PolicyStatement(
			actions=[
				"ecr:DescribeImages",
				"ecr:ListImages",
				"ecr:BatchDeleteImage"
			], 
			effect=iam.Effect.ALLOW, 
			resources=[
				"arn:aws:ecr:%s:%s:repository/%s" % (core.Stack.of(self).region, core.Stack.of(self).account, namespace) for namespace in self.node.try_get_context("kubernetes")['namespaces']
			]
		)

		function = lbd.SingletonFunction(
			self,
			"ECRDeleteImagesFunction",
			uuid="19411b0e-0e80-4ad4-a316-3235940775e4",
			code=lbd.Code.from_asset(
				"custom_resources/kubernetes/"
			),
			handler="config.handler",
			runtime=lbd.Runtime.PYTHON_3_7,
			function_name="kubernetesConfig",
			initial_policy=[ecr_policy],
			log_retention=logs.RetentionDays.ONE_DAY,
			timeout=core.Duration.seconds(30)
		)

		provider = cr.Provider(
			self, "ECRDeleteImagesFunctionProvider",
    	on_event_handler=function,
    	log_retention=logs.RetentionDays.ONE_DAY
		)


		repositores = []
		for namespace in self.node.try_get_context("kubernetes")['namespaces']: 
			manifest = cluster.add_manifest(
				"eksConfigNamespace-%s" % namespace,
				{
					"apiVersion": "v1",
					"kind": "Namespace",
					"metadata": {
						"name": namespace
					}
				}
			)

			sa = cluster.add_service_account(
				"service-account-%s" % namespace,
				name="statement-demo",
				namespace=namespace
			)
			sa.node.add_dependency(manifest)
			self.roles.append(sa.role)

			repository = ecr.Repository(
				self, ("repository-%s" % namespace),
				removal_policy=core.RemovalPolicy.DESTROY,
				repository_name=namespace,
				lifecycle_rules=[ecr.LifecycleRule(max_image_count=1)]
			)

			repositores.append(repository.repository_arn)

			maps.append({
				"apiVersion": "v1",
				"kind": "ConfigMap",
				"metadata": {
					"name": "application.properties",
					"namespace": namespace
				},
				"data": {
					"application-aws.properties":  Path("../%s/src/main/resources/application-aws.properties" % namespace).read_text()
				}
			})

			core.CustomResource(
				self, "ECRDeleteImagesFunction-%s" % namespace, 
				service_token=provider.service_token,
				properties={
					"repository": namespace
				}
			).node.add_dependency(repository)

		eks.KubernetesManifest(
			self, 
			"eksConfigMaps", 
			cluster=cluster, 
			manifest=maps
		)

		iam.Policy(
			self, "saPolicy", 
			force=True, 
			policy_name="EKSSAPolicy", 
			roles=self.roles, 
			statements=[
				iam.PolicyStatement(
					actions=["cloudwatch:PutMetricData"], 
					conditions={
						"StringEquals": {
							"cloudwatch:namespace": "statement12"
						},
					},
					resources=["*"]
				)
			]
		)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        lambda_policies = [
            iam.PolicyStatement(actions=[
                "logs:CreateLogStream", "logs:PutLogEvents",
                "logs:CreateLogGroup"
            ],
                                effect=iam.Effect.ALLOW,
                                resources=[
                                    "arn:aws:logs:" + core.Aws.REGION + ":" +
                                    core.Aws.ACCOUNT_ID + ":*"
                                ]),
            iam.PolicyStatement(actions=["dynamodb:*"],
                                effect=iam.Effect.ALLOW,
                                resources=[
                                    "arn:aws:dynamodb:" + core.Aws.REGION +
                                    ":" + core.Aws.ACCOUNT_ID + ":*"
                                ])
        ]

        table = _dynamodb.Table(
            self,
            'VisitTable',
            partition_key={
                'name': 'id',
                'type': _dynamodb.AttributeType.STRING
            },
            removal_policy=core.RemovalPolicy.DESTROY,
            read_capacity=5,
            write_capacity=5,
        )

        # Modify the config.js with CF custome resource

        modify_policy = [
            iam.PolicyStatement(actions=["dynamodb:*"],
                                effect=iam.Effect.ALLOW,
                                resources=[
                                    "arn:aws:dynamodb:" + core.Aws.REGION +
                                    ":" + core.Aws.ACCOUNT_ID + ":*"
                                ])
        ]

        resource = _cfn.CustomResource(
            self,
            "VisitDataImportCustomResource",
            provider=_cfn.CustomResourceProvider.lambda_(
                _lambda.SingletonFunction(
                    self,
                    "CustomResourceSingleton",
                    uuid="f7d4f730-4ee1-11e8-9c2d-fa7ae01bbebc",
                    code=self.custom_resource,
                    handler="index.handler",
                    timeout=core.Duration.seconds(300),
                    runtime=_lambda.Runtime.PYTHON_3_7,
                    initial_policy=modify_policy)),
            properties={"DynamoDBTable": table.table_name})

        base_lambda = _lambda.Function(
            self,
            'ApiPetclinicVisitLambda',
            handler=
            'org.springframework.samples.petclinic.visits.StreamLambdaHandler::handleRequest',
            runtime=_lambda.Runtime.JAVA_8,
            code=self.lambda_code,
            memory_size=1024,
            timeout=core.Duration.seconds(300),
            initial_policy=lambda_policies,
            environment={
                "DYNAMODB_TABLE_NAME": table.table_name,
                "SERVER_SERVLET_CONTEXT_PATH": "/api/visit"
            })

        version = base_lambda.add_version(str(round(time.time())))

        alias = _lambda.Alias(self,
                              'ApiPetclinicVisitLambdaAlias',
                              alias_name='Prod',
                              version=version,
                              provisioned_concurrent_executions=5)

        _deploy.LambdaDeploymentGroup(
            self,
            'ApiPetclinicVisitDeploymentGroup',
            alias=alias,
            deployment_config=_deploy.LambdaDeploymentConfig.
            LINEAR_10_PERCENT_EVERY_1_MINUTE)
    def __init__(self, scope: core.Construct, id: str,
                 log_bucket: _s3.Bucket,
                 config_table: _dynamodb.Table,
                 tshirt_size: str,
                 sink_bucket: _s3.Bucket,
                 vpc: _ec2.Vpc,
                 **kwargs) -> None:

        super().__init__(scope, id, **kwargs)

        service_role = _iam.Role(
            self, 'BatchEmrServiceRole',
            assumed_by=_iam.ServicePrincipal('elasticmapreduce.amazonaws.com')
        )

        service_role.add_managed_policy(_iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AmazonElasticMapReduceRole'))

        cluster_role = _iam.Role(
            self, 'BatchEmrClusterRole',
            assumed_by=_iam.ServicePrincipal("ec2.amazonaws.com")
        )

        _iam.Policy(
            self, 'BatchEmrClusterPolicy',
            statements=[
                _iam.PolicyStatement(
                    actions=[
                        "glue:CreateDatabase",
                        "glue:UpdateDatabase",
                        "glue:DeleteDatabase",
                        "glue:GetDatabase",
                        "glue:GetDatabases",
                        "glue:CreateTable",
                        "glue:UpdateTable",
                        "glue:DeleteTable",
                        "glue:GetTable",
                        "glue:GetTables",
                        "glue:GetTableVersions",
                        "glue:CreatePartition",
                        "glue:BatchCreatePartition",
                        "glue:UpdatePartition",
                        "glue:DeletePartition",
                        "glue:BatchDeletePartition",
                        "glue:GetPartition",
                        "glue:GetPartitions",
                        "glue:BatchGetPartition",
                        "glue:CreateUserDefinedFunction",
                        "glue:UpdateUserDefinedFunction",
                        "glue:DeleteUserDefinedFunction",
                        "glue:GetUserDefinedFunction",
                        "glue:GetUserDefinedFunctions",
                        "cloudwatch:PutMetricData",
                        "dynamodb:ListTables",
                        "s3:HeadBucket",
                        "ec2:Describe*",
                    ],
                    resources=['*']
                ),
                _iam.PolicyStatement(
                    actions=['s3:GetObject'],
                    resources=[
                        'arn:aws:s3:::' + ARA_BUCKET_NAME + BINARIES + DataGenConfig.DSDGEN_INSTALL_SCRIPT,
                        'arn:aws:s3:::' + ARA_BUCKET_NAME + BINARIES + DataGenConfig.JAR_FILE
                    ]
                ),
                _iam.PolicyStatement(
                    actions=['s3:PutObject'],
                    resources=[log_bucket.bucket_arn + "/data-generator/*"]
                ),
                _iam.PolicyStatement(
                    actions=[
                        "s3:AbortMultipartUpload",
                        "s3:CreateBucket",
                        "s3:DeleteObject",
                        "s3:GetBucketVersioning",
                        "s3:GetObject",
                        "s3:GetObjectTagging",
                        "s3:GetObjectVersion",
                        "s3:ListBucket",
                        "s3:ListBucketMultipartUploads",
                        "s3:ListBucketVersions",
                        "s3:ListMultipartUploadParts",
                        "s3:PutBucketVersioning",
                        "s3:PutObject",
                        "s3:PutObjectTagging"
                    ],
                    resources=[
                        sink_bucket.bucket_arn + '/*',
                        sink_bucket.bucket_arn

                    ]
                )
            ],
            roles=[cluster_role]
        )

        cluster_role.add_managed_policy(_iam.ManagedPolicy.from_aws_managed_policy_name('AmazonSSMManagedInstanceCore'))

        _iam.CfnInstanceProfile(
            self, 'BatchEmrClusterInstanceProfile',
            roles=[cluster_role.role_name],
            instance_profile_name=cluster_role.role_name
        )

        # Security Groups for the EMR cluster (private subnet)
        # https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-man-sec-groups.html#emr-sg-elasticmapreduce-master-private
        master_sg = _ec2.SecurityGroup(self, 'ElasticMapReduce-Master-Private', vpc=vpc)
        slave_sg = _ec2.SecurityGroup(self, 'ElasticMapReduce-Slave-Private', vpc=vpc)
        service_sg = _ec2.SecurityGroup(self, 'ElasticMapReduce-ServiceAccess', vpc=vpc, allow_all_outbound=False)

        # Service SG used by the proxy instance
        service_sg.add_ingress_rule(master_sg, _ec2.Port.tcp(9443))
        service_sg.add_egress_rule(master_sg, _ec2.Port.tcp(8443))
        service_sg.add_egress_rule(slave_sg, _ec2.Port.tcp(8443))

        # EMR Master
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_icmp())
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_tcp())
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_udp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_icmp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_tcp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_udp())
        master_sg.add_ingress_rule(service_sg, _ec2.Port.tcp(8443))

        # EMR Slave
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_icmp())
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_tcp())
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_udp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_icmp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_tcp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_udp())
        slave_sg.add_ingress_rule(service_sg, _ec2.Port.tcp(8443))

        with open('common/common_cdk/lambda/datagen_config.py', 'r') as f:
            lambda_source = f.read()

        configure_datagen_function = _lambda.SingletonFunction(
            self, 'BatchConfigureDatagenLambda',
            uuid="58a9a222-ff07-11ea-adc1-0242ac120002",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            function_name='datagen-config',
            environment={
                'TABLE_NAME': config_table.table_name,
                'JAR_LOCATION': BINARIES_LOCATION + DataGenConfig.JAR_FILE,
            },
            timeout=core.Duration.seconds(10)
        )

        configure_datagen_function.role.add_to_policy(
            _iam.PolicyStatement(
                actions=[
                    'dynamodb:GetItem',
                    'dynamodb:PutItem',
                ],
                resources=[config_table.table_arn]
            )
        )

        terminate_cluster = _sfn_tasks.EmrTerminateCluster(
            self, 'BatchDeleteCluster',
            cluster_id=_sfn.TaskInput.from_data_at("$.Emr.Cluster.Id").value,
            integration_pattern=_sfn.IntegrationPattern.RUN_JOB,
        )

        terminate_cluster_error = _sfn_tasks.EmrTerminateCluster(
            self, 'BatchDeleteClusterError',
            cluster_id=_sfn.TaskInput.from_data_at("$.Emr.Cluster.Id").value,
            integration_pattern=_sfn.IntegrationPattern.RUN_JOB,
        ).next(_sfn.Fail(self, 'StepFailure'))

        create_cluster = _sfn_tasks.EmrCreateCluster(
            self, "BatchCreateEMRCluster",
            name="BatchDatagenCluster",
            result_path="$.Emr",
            release_label='emr-5.30.1',
            log_uri=log_bucket.s3_url_for_object() + "/data-generator",
            cluster_role=cluster_role,
            service_role=service_role,
            bootstrap_actions=[
                _sfn_tasks.EmrCreateCluster.BootstrapActionConfigProperty(
                    name="dsdgen-install",
                    script_bootstrap_action=_sfn_tasks.EmrCreateCluster.ScriptBootstrapActionConfigProperty(
                        path=BINARIES_LOCATION + DataGenConfig.DSDGEN_INSTALL_SCRIPT,
                    )
                )
            ],
            applications=[
                _sfn_tasks.EmrCreateCluster.ApplicationConfigProperty(
                    name="spark"
                ),
                _sfn_tasks.EmrCreateCluster.ApplicationConfigProperty(
                    name="hadoop"
                )
            ],
            instances=_sfn_tasks.EmrCreateCluster.InstancesConfigProperty(
                emr_managed_master_security_group=master_sg.security_group_id,
                emr_managed_slave_security_group=slave_sg.security_group_id,
                service_access_security_group=service_sg.security_group_id,
                ec2_subnet_ids=vpc.select_subnets().subnet_ids,
                instance_fleets=[
                    _sfn_tasks.EmrCreateCluster.InstanceFleetConfigProperty(
                        instance_fleet_type=_sfn_tasks.EmrCreateCluster.InstanceRoleType.MASTER,
                        instance_type_configs=[
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5.xlarge',
                                weighted_capacity=1
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5a.xlarge',
                                weighted_capacity=1
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m4.xlarge',
                                weighted_capacity=1
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5d.xlarge',
                                weighted_capacity=1
                            ),
                        ],
                        launch_specifications=_sfn_tasks.EmrCreateCluster.InstanceFleetProvisioningSpecificationsProperty(
                            spot_specification=_sfn_tasks.EmrCreateCluster.SpotProvisioningSpecificationProperty(
                                timeout_action=_sfn_tasks.EmrCreateCluster.SpotTimeoutAction.SWITCH_TO_ON_DEMAND,
                                timeout_duration_minutes=5
                            )
                        ),
                        target_on_demand_capacity=0,
                        target_spot_capacity=1
                    ),
                    _sfn_tasks.EmrCreateCluster.InstanceFleetConfigProperty(
                        instance_fleet_type=_sfn_tasks.EmrCreateCluster.InstanceRoleType.CORE,
                        instance_type_configs=[
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5.xlarge',
                                weighted_capacity=1
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5.2xlarge',
                                weighted_capacity=2
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5a.xlarge',
                                weighted_capacity=1
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5a.2xlarge',
                                weighted_capacity=2
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m4.xlarge',
                                weighted_capacity=1
                            )
                        ],
                        launch_specifications=_sfn_tasks.EmrCreateCluster.InstanceFleetProvisioningSpecificationsProperty(
                            spot_specification=_sfn_tasks.EmrCreateCluster.SpotProvisioningSpecificationProperty(
                                timeout_action=_sfn_tasks.EmrCreateCluster.SpotTimeoutAction.SWITCH_TO_ON_DEMAND,
                                timeout_duration_minutes=5
                            )
                        ),
                        target_on_demand_capacity=0,
                        target_spot_capacity=DataGenConfig.BATCH_CLUSTER_SIZE[tshirt_size]

                    )
                ]
            )
        ).add_catch(handler=terminate_cluster_error, result_path="$.error")

        configure_datagen = _sfn_tasks.LambdaInvoke(
            self, "BatchConfigureDatagenTask",
            lambda_function=configure_datagen_function,
            payload=_sfn.TaskInput.from_text('{'
                                             '"Param": "batch_iterator",'
                                             '"Module": "batch",'
                                             '"SinkBucket": "'+sink_bucket.s3_url_for_object()+'",'
                                             '"Parallelism": "'+str(int(DataGenConfig.BATCH_DATA_SIZE[tshirt_size])*2)+'",'
                                             '"DataSize": "'+DataGenConfig.BATCH_DATA_SIZE[tshirt_size]+'",'
                                             '"TmpBucket": "fake-bucket"'
                                             '}'),
            result_path='$.Config'
        ).add_catch(handler=terminate_cluster_error, result_path="$.error")

        add_datagen_step = _sfn.CustomState(
            self, 'BatchAddDataGenStep',
            state_json={
                "Type": "Task",
                "Resource": "arn:aws:states:::elasticmapreduce:addStep.sync",
                "Parameters": {
                    "ClusterId.$": "$.Emr.Cluster.Id",
                    "Step": {
                        "Name": "DatagenStep",
                        "ActionOnFailure": "CONTINUE",
                        "HadoopJarStep": {
                            "Jar": "command-runner.jar",
                            "Args.$": "$.Config.Payload.StepParam"
                        }
                    }
                },
                "ResultPath": "$.Step",
                "Next": "BatchUpdateIterator",
                "Catch": [
                    {
                        "ErrorEquals": ["States.ALL"],
                        "Next": "BatchDeleteClusterError",
                        "ResultPath": "$.error"
                    }
                ]
            }
        )

        update_iterator = _sfn_tasks.DynamoUpdateItem(
            self, 'BatchUpdateIterator',
            table=config_table,
            key={
                'param': _sfn_tasks.DynamoAttributeValue.from_string('batch_iterator')
            },
            update_expression='SET iterator = if_not_exists(iterator, :start) + :inc',
            expression_attribute_values={
                ":inc": _sfn_tasks.DynamoAttributeValue.from_number(1),
                ":start": _sfn_tasks.DynamoAttributeValue.from_number(0)
            },
            result_path=_sfn.JsonPath.DISCARD
        )

        definition = configure_datagen \
            .next(create_cluster) \
            .next(add_datagen_step) \
            .next(update_iterator) \
            .next(terminate_cluster)

        datagen_stepfunctions = _sfn.StateMachine(
            self, "BatchDataGenStepFunctions",
            definition=definition,
            timeout=core.Duration.minutes(30)
        )

        datagen_stepfunctions.add_to_role_policy(
            _iam.PolicyStatement(
                actions=[
                    'elasticmapreduce:AddJobFlowSteps',
                    'elasticmapreduce:DescribeStep'
                ],
                resources=['*']
            )
        )
        datagen_stepfunctions.add_to_role_policy(
            _iam.PolicyStatement(
                actions= [
                    "iam:CreateServiceLinkedRole",
                    "iam:PutRolePolicy"
                ],
                resources=["arn:aws:iam::*:role/aws-service-role/elasticmapreduce.amazonaws.com*/AWSServiceRoleForEMRCleanup*"],
                conditions= {
                    "StringLike": {
                        "iam:AWSServiceName": [
                            "elasticmapreduce.amazonaws.com",
                            "elasticmapreduce.amazonaws.com.cn"
                        ]
                    }
                }
            )
        )

        step_trigger = _events.Rule(
            self, 'BatchSteptrigger',
            schedule=_events.Schedule.cron(minute='0/30',
                                           hour='*',
                                           month='*',
                                           week_day='*',
                                           year='*')
        )

        step_trigger.add_target(_events_targets.SfnStateMachine(machine=datagen_stepfunctions))

        with open('common/common_cdk/lambda/stepfunctions_trigger.py', 'r') as f:
            lambda_source = f.read()

        stepfunctions_trigger_lambda = _lambda.SingletonFunction(
            self, 'BatchStepFunctionsTriggerLambda',
            uuid="9597f6f2-f840-11ea-adc1-0242ac120002",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            function_name='stepfunctions-batch-datagen-trigger'
        )

        stepfunctions_trigger_lambda.role.add_to_policy(
            _iam.PolicyStatement(
                actions=["states:StartExecution"],
                resources=['*']
            )
        )

        trigger_step_lambda_provider = _custom_resources.Provider(
            self, 'StepFunctionsTriggerLambdaProvider',
            on_event_handler=stepfunctions_trigger_lambda
        )

        core.CustomResource(
            self, 'StepFunctionsTrigger',
            service_token=trigger_step_lambda_provider.service_token,
            properties={
                "stepArn": datagen_stepfunctions.state_machine_arn
            }
        )

        # terminate clusters
        with open('common/common_cdk/lambda/stepfunctions_terminate_emr.py', 'r') as f:
            lambda_source = f.read()

        sfn_terminate = _lambda.SingletonFunction(
            self, 'StepFuncTerminateBatch',
            uuid='58a9a422-ff07-11ea-adc1-0242ac120002',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            timeout=core.Duration.minutes(5)
        )

        sfn_terminate.role.add_to_policy(
            _iam.PolicyStatement(
                actions=[
                    'elasticmapreduce:ListClusters',
                    'elasticmapreduce:TerminateJobFlows',
                    'states:ListStateMachines',
                    'states:ListExecutions',
                    'states:StopExecution'
                ],
                resources=['*']
            )
        )

        sfn_terminate_provider = _custom_resources.Provider(
            self, 'StepFuncTerminateBatchLambdaProvider',
            on_event_handler=sfn_terminate
        )

        core.CustomResource(
            self, 'StepFuncTerminateBatchCustomResource',
            service_token=sfn_terminate_provider.service_token,
            properties={
                "state_machine": 'BatchDatagen'
            })