Ejemplo n.º 1
0
def add_static_site(stack: core.Stack):
    stack.static_site_bucket = Bucket(stack,
                                      'StaticSiteBucket',
                                      website_index_document="index.html",
                                      website_error_document="error.html",
                                      public_read_access=True,
                                      removal_policy=RemovalPolicy.RETAIN)

    stack.static_bucket_deploy = s3_deployment.BucketDeployment(
        stack,
        "StaticSiteDeploy",
        sources=[s3_deployment.Source.asset("./www/static-site-content")],
        destination_bucket=stack.static_site_bucket)
Ejemplo n.º 2
0
    def create_function(self, other_stack: Stack, id, *, code: AssetCode,
                        handler: str, runtime: Runtime) -> IVersion:
        func = Function(
            self,
            id,
            code=code,
            handler=handler,
            runtime=runtime,
            role=self._role,
        )

        # If code/runtime changes, CDK doesn't re-evaluate the version. In
        # result, we store an old version, and things don't work. But we also
        # don't want to generate a new version every run. The compromise: use
        # the sha256 hash of the index file.
        with open(f"{code.path}/index.js", "rb") as f:
            sha256 = hashlib.sha256(f.read()).hexdigest()
        version = func.add_version(f"Version-{sha256}")

        # Create an entry in the parameter-store that tells the arn of this lambda
        parameter_name = parameter_store.get_parameter_name(
            f"/LambdaEdge/{id}")
        StringParameter(
            self,
            parameter_name,
            string_value=Fn.join(
                ":",
                [
                    func.function_arn,
                    version.version,
                ],
            ),
            parameter_name=parameter_name,
        )

        other_stack.add_dependency(self)

        # Create a custom resource that fetches the arn of the lambda
        cross_region_func = LambdaEdgeFunction(
            other_stack,
            f"LambdaEdgeFunction-{sha256}",
            parameter_name=parameter_name,
            policy=AwsCustomResourcePolicy.from_sdk_calls(
                resources=AwsCustomResourcePolicy.ANY_RESOURCE),
        )
        # Create the lambda function based on this arn
        return Version.from_version_arn(other_stack, id,
                                        cross_region_func.get_arn())
Ejemplo n.º 3
0
 def _add_custom_cookbook_policies_to_role(self, role_ref: str, name: str):
     bucket_info = parse_bucket_url(
         self._config.dev_settings.cookbook.chef_cookbook)
     bucket_name = bucket_info.get("bucket_name")
     object_key = bucket_info.get("object_key")
     iam.CfnPolicy(
         Stack.of(self),
         name,
         policy_name="CustomCookbookS3Url",
         policy_document=iam.PolicyDocument(statements=[
             iam.PolicyStatement(
                 actions=["s3:GetObject"],
                 effect=iam.Effect.ALLOW,
                 resources=[
                     self._format_arn(region="",
                                      service="s3",
                                      account="",
                                      resource=bucket_name,
                                      resource_name=object_key)
                 ],
             ),
             iam.PolicyStatement(
                 actions=["s3:GetBucketLocation"],
                 effect=iam.Effect.ALLOW,
                 resources=[
                     self._format_arn(service="s3",
                                      resource=bucket_name,
                                      region="",
                                      account="")
                 ],
             ),
         ]),
         roles=[role_ref],
     )
Ejemplo n.º 4
0
 def _add_node_role(self, node: Union[HeadNode, BaseQueue], name: str):
     additional_iam_policies = set(node.iam.additional_iam_policy_arns)
     if self._config.monitoring.logs.cloud_watch.enabled:
         additional_iam_policies.add(
             policy_name_to_arn("CloudWatchAgentServerPolicy"))
     if self._config.scheduling.scheduler == "awsbatch":
         additional_iam_policies.add(
             policy_name_to_arn("AWSBatchFullAccess"))
     return iam.CfnRole(
         Stack.of(self),
         name,
         path=self._cluster_scoped_iam_path(),
         managed_policy_arns=list(additional_iam_policies),
         assume_role_policy_document=get_assume_role_policy_document(
             "ec2.{0}".format(Stack.of(self).url_suffix)),
     )
Ejemplo n.º 5
0
 def _build_policy(self) -> List[iam.PolicyStatement]:
     return [
         iam.PolicyStatement(
             sid="Ec2",
             actions=[
                 "ec2:DescribeInstanceAttribute",
             ],
             effect=iam.Effect.ALLOW,
             resources=["*"],
         ),
         iam.PolicyStatement(
             sid="S3GetObj",
             actions=["s3:GetObject"],
             effect=iam.Effect.ALLOW,
             resources=[
                 self._format_arn(
                     service="s3",
                     resource="{0}-aws-parallelcluster/*".format(
                         Stack.of(self).region),
                     region="",
                     account="",
                 )
             ],
         ),
     ]
Ejemplo n.º 6
0
def test_FUNC_constructor_WITH_various_parameters_EXPECT_layer_created():
    """
    Test whether the layer can be created.

    :return: No return.
    """
    layer = Layer(Stack(App(), 'Test'), 'Test')
    assert layer is not None
Ejemplo n.º 7
0
 def _add_pcluster_policies_to_role(self, role_ref: str, name: str):
     iam.CfnPolicy(
         Stack.of(self),
         name,
         policy_name="parallelcluster",
         policy_document=iam.PolicyDocument(
             statements=self._build_policy()),
         roles=[role_ref],
     )
    def __init__(
        self,
        scope: core.Construct,
        id_: str,
        role_alias: str,
        role_arn: str,
        credential_duration_seconds: int,
        log_retention=None,
        timeout=None,
    ) -> None:
        super().__init__(scope, id_)

        on_create = self.get_on_create(
            role_alias=role_alias,
            role_arn=role_arn,
            credential_duration_seconds=credential_duration_seconds)
        on_update = self.get_on_update(
            role_alias=role_alias,
            role_arn=role_arn,
            credential_duration_seconds=credential_duration_seconds)
        on_delete = self.get_on_delete(role_alias=role_alias)

        account_id = Stack.of(self).account
        region = Stack.of(self).region

        policy = AwsCustomResourcePolicy.from_sdk_calls(resources=[
            f'arn:aws:iot:{region}:{account_id}:rolealias/{role_alias}'
        ])
        lambda_role_singleton = CustomResourcesLambdaRole(scope)
        lambda_role_singleton.add_to_policy(actions=["iam:PassRole"],
                                            resources=[role_arn])

        # lambda_role = self.get_provisioning_lambda_role(role_arn)

        AwsCustomResource(scope=self,
                          id=f'CustomResource',
                          policy=policy,
                          log_retention=log_retention,
                          on_create=on_create,
                          on_update=on_update,
                          on_delete=on_delete,
                          resource_type='Custom::AWS-IoT-Role-Alias',
                          role=lambda_role_singleton.role,
                          timeout=timeout)
Ejemplo n.º 9
0
def test_ALL_FUNC_WITH_default_params_EXPECT_singleton_initialized():
    """
    Test whether the layer singleton works.

    :return: No return.
    """
    with pytest.raises(Exception):
        LayerSingleton(Stack(App(), 'Test'), 'Test')

    with pytest.raises(Exception):
        LayerSingleton.get_instance()

    assert LayerSingleton.is_initialized() is False

    LayerSingleton.initialize(Stack(App(), 'Test'), 'Test')

    with pytest.raises(Exception):
        LayerSingleton.initialize(Stack(App(), 'Test'), 'Test')

    LayerSingleton.safe_initialize(Stack(App(), 'Test'), 'Test')

    assert isinstance(LayerSingleton.get_instance().layer, Layer)
Ejemplo n.º 10
0
    def __init__(self, scope: Construct, stack_id: str, *,
                 props: ComputeStackProps, **kwargs):
        super().__init__(scope, stack_id, **kwargs)

        region = Stack.of(self).region

        version = VersionQuery(self, 'Version', version=props.deadline_version)

        # Take a Linux image and install Deadline on it to create a new image
        linux_image = DeadlineMachineImage(
            self,
            "LinuxImage",
            props=ImageBuilderProps(
                # We use the linux full version string here as there is no Windows equivalent available on the
                # VersionQuery right now, since it is only exposing Linux installers.
                deadline_version=version.linux_full_version_string(),
                parent_ami=MachineImage.latest_amazon_linux(),
                image_version=props.image_recipe_version))
        # Set up a worker fleet that uses the image we just created
        worker_fleet_linux = WorkerInstanceFleet(
            self,
            "WorkerFleetLinux",
            vpc=props.vpc,
            render_queue=props.render_queue,
            worker_machine_image=MachineImage.generic_linux(
                {region: linux_image.ami_id}))
        worker_fleet_linux.fleet.node.default_child.node.add_dependency(
            linux_image.node.default_child)

        # Take a Windows image and install Deadline on it to create a new image
        windows_image = DeadlineMachineImage(
            self,
            "WindowsImage",
            props=ImageBuilderProps(
                deadline_version=version.linux_full_version_string(),
                parent_ami=MachineImage.latest_windows(
                    WindowsVersion.WINDOWS_SERVER_2019_ENGLISH_CORE_BASE),
                image_version=props.image_recipe_version))
        # Set up a worker fleet that uses the image we just created
        worker_fleet_windows = WorkerInstanceFleet(
            self,
            "WorkerFleetWindows",
            vpc=props.vpc,
            render_queue=props.render_queue,
            worker_machine_image=MachineImage.generic_windows(
                {region: windows_image.ami_id}))
        worker_fleet_windows.fleet.node.default_child.node.add_dependency(
            windows_image.node.default_child)
def test_FUNC_hash_WITH_valid_parameters_EXPECT_hash_created():
    """
    Test that hashing is consistent and works as expected.

    :return: No return.
    """
    stack = Stack(App(), 'TestStack')

    integration = LambdaIntegration(
        scope=stack,
        integration_name='TestIntegration',
        api=CfnApi(stack, 'TestApi'),
        lambda_function=Function(
            stack,
            'TestLambdaFunction',
            code=Code.from_inline('def handler(*args, **kwargs): return 123'),
            handler='index.handler',
            runtime=Runtime.PYTHON_3_6))

    assert integration.hash == 'ab93cecc508e529c3791ba48a1275deec88cdd6b43a7e1d443906df48fa300e4'
Ejemplo n.º 12
0
    def _add_s3_access_policies_to_role(self, node: Union[HeadNode, BaseQueue],
                                        role_ref: str, name: str):
        """Attach S3 policies to given role."""
        read_only_s3_resources = []
        read_write_s3_resources = []
        for s3_access in node.iam.s3_access:
            for resource in s3_access.resource_regex:
                arn = self._format_arn(service="s3",
                                       resource=resource,
                                       region="",
                                       account="")
                if s3_access.enable_write_access:
                    read_write_s3_resources.append(arn)
                else:
                    read_only_s3_resources.append(arn)

        s3_access_policy = iam.CfnPolicy(
            Stack.of(self),
            name,
            policy_document=iam.PolicyDocument(statements=[]),
            roles=[role_ref],
            policy_name="S3Access",
        )

        if read_only_s3_resources:
            s3_access_policy.policy_document.add_statements(
                iam.PolicyStatement(
                    sid="S3Read",
                    effect=iam.Effect.ALLOW,
                    actions=["s3:Get*", "s3:List*"],
                    resources=read_only_s3_resources,
                ))

        if read_write_s3_resources:
            s3_access_policy.policy_document.add_statements(
                iam.PolicyStatement(sid="S3ReadWrite",
                                    effect=iam.Effect.ALLOW,
                                    actions=["s3:*"],
                                    resources=read_write_s3_resources))
Ejemplo n.º 13
0
def add_react_build(stack: core.Stack, code_pipeline, source_output,
                    bucket_arn: str):
    # Could refactor the bucket to be part of the stage

    # https://github.com/aws-samples/aws-cdk-examples/blob/master/typescript/static-site/static-site.ts
    # Need to move to a stack / into startuptoolbag
    # The codebuild project can be moved back out into the pipeline (bit awkward?)

    react_site_bucket = Bucket.from_bucket_arn(stack,
                                               id='SiteBucket',
                                               bucket_arn=bucket_arn)
    stack.build_output_artifact = codepipeline.Artifact()
    build_output_artifact = codepipeline.Artifact()
    codebuild_project = codebuild.PipelineProject(
        stack,
        "t-u-b-CDKCodebuild",
        project_name="t-u-b-CodebuildProject",
        build_spec=codebuild.BuildSpec.from_source_filename(
            filename='buildspec.yml'),
        environment=codebuild.BuildEnvironment(privileged=True),
        description='Pipeline for the-ultimate-boilerplate',
        timeout=core.Duration.minutes(60),
    )

    build_action = codepipeline_actions.CodeBuildAction(
        action_name="ReactBuild",
        project=codebuild_project,
        input=source_output,
        outputs=[build_output_artifact])

    s3_deploy = codepipeline_actions.S3DeployAction(
        action_name="ReactS3Push",
        input=build_output_artifact,
        bucket=react_site_bucket)

    # Would be more elegant to be one stage but the input to deploy must be created in a prior stage
    code_pipeline.add_stage(stage_name="ReactBuild", actions=[build_action])
    code_pipeline.add_stage(stage_name="ReactDeploy", actions=[s3_deploy])
Ejemplo n.º 14
0
 def _stack_unique_id(self):
     return Fn.select(2, Fn.split("/", Stack.of(self).stack_id))
Ejemplo n.º 15
0
#!/usr/bin/env python3

from aws_cdk.core import App, Stack, CfnParameter, NestedStack
from high_available_webapp.stack.vpc_stack import NetworkStack
from high_available_webapp.stack.application_stack import ApplicationStack

app = App()

main_stack = Stack(app, 'High-Available-WebApps', env={'region': 'us-west-2'})

vpc_stack = NetworkStack(main_stack, 'VpcStack')

ha_app_stack = ApplicationStack(main_stack, 'HA-APP', vpc=vpc_stack.vpc)

# load_balancer = LoadBalancerLib(main_stack, 'ALB', vpc=vpc_stack.vpc, auto_scaling=auto_scaling.ASG)

# webapp_stack = NestedStack(main_stack, 'WebApps')
# webapp_app_1 = AppLib(webapp_stack, 'WebAppStack1', vpc=vpc_stack.vpc, security_group=vpc_stack.security_group)
# webapp_app_2 = AppLib(webapp_stack, 'WebAppStack2', vpc=vpc_stack.vpc, security_group=vpc_stack.security_group)

app.synth()
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 redshift_cluster_name: str,
                 user_secret: Secret) -> None:
        super().__init__(scope, id)

        stack = Stack.of(self)

        subprocess.call(
            ['pip', 'install', '-t', 'dwh/dwh_loader_layer/python/lib/python3.8/site-packages', '-r',
             'dwh/dwh_loader/requirements.txt', '--platform', 'manylinux1_x86_64', '--only-binary=:all:',
             '--upgrade'])

        requirements_layer = _lambda.LayerVersion(scope=self,
                                                  id='PythonRequirementsTemplate',
                                                  code=_lambda.Code.from_asset('dwh/dwh_loader_layer'),
                                                  compatible_runtimes=[_lambda.Runtime.PYTHON_3_8])

        dwh_loader_role = _iam.Role(
            self, 'Role',
            assumed_by=_iam.ServicePrincipal('lambda.amazonaws.com')
        )

        dwh_loader_role.add_managed_policy(_iam.ManagedPolicy.from_aws_managed_policy_name(
            'service-role/AWSLambdaBasicExecutionRole'
        ))

        dwh_loader_role.attach_inline_policy(
            _iam.Policy(
                self, 'InlinePolicy',
                statements=[
                    _iam.PolicyStatement(
                        actions=[
                            "redshift-data:ExecuteStatement",
                            "redshift-data:CancelStatement",
                            "redshift-data:ListStatements",
                            "redshift-data:GetStatementResult",
                            "redshift-data:DescribeStatement",
                            "redshift-data:ListDatabases",
                            "redshift-data:ListSchemas",
                            "redshift-data:ListTables",
                            "redshift-data:DescribeTable"
                        ],
                        resources=['*']
                    ),
                    _iam.PolicyStatement(
                        actions=["secretsmanager:GetSecretValue"],
                        resources=[user_secret.secret_arn]
                    ),
                    _iam.PolicyStatement(
                        actions=["redshift:GetClusterCredentials"],
                        resources=[
                            "arn:aws:redshift:*:*:dbname:*/*",
                            "arn:aws:redshift:*:*:dbuser:*/"+_config.Redshift.ETL_USER
                        ]
                    ),
                    _iam.PolicyStatement(
                        effect=_iam.Effect('DENY'),
                        actions=["redshift:CreateClusterUser"],
                        resources=["arn:aws:redshift:*:*:dbuser:*/"+_config.Redshift.ETL_USER]
                    ),
                    _iam.PolicyStatement(
                        conditions={
                            'StringLike': {
                                "iam:AWSServiceName": "redshift-data.amazonaws.com"
                            }
                        },
                        actions=["iam:CreateServiceLinkedRole"],
                        resources=["arn:aws:iam::*:role/aws-service-role/redshift-data.amazonaws.com/AWSServiceRoleForRedshift"]
                    ),
                ]
            )
        )

        dwh_loader_function = _lambda.Function(
            self, 'Lambda',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset('dwh/dwh_loader'),
            handler='dwh_loader.handler',
            function_name='dwh-loader',
            environment={
                'CLUSTER_NAME': redshift_cluster_name,
                'PROCEDURE': _config.Redshift.ETL_PROCEDURE,
                'SECRET_ARN': user_secret.secret_arn,
                'DATABASE': _config.Redshift.DATABASE,
                'REGION': core.Aws.REGION,
                'SCHEMA': _config.Redshift.SCHEMA
            },
            layers=[requirements_layer],
            timeout=core.Duration.seconds(30),
            role=dwh_loader_role
        )

        dwh_loader_submit = _sfn_tasks.LambdaInvoke(
            self, 'Submit',
            lambda_function=dwh_loader_function,
            payload_response_only=True
        )

        dwh_loader_wait = _sfn.Wait(
            self, 'Wait',
            time=_sfn.WaitTime.duration(core.Duration.seconds(30))
        )

        dwh_loader_complete = _sfn.Choice(
            self, 'Complete'
        )

        dwh_loader_failed = _sfn.Fail(
            self, 'Fail',
            cause="Redshift Data API statement failed",
            error="$.Result.Error"
        )

        dwh_loader_status = _sfn_tasks.LambdaInvoke(
            self, 'Status',
            lambda_function=dwh_loader_function,
            result_path='$.Result',
            payload_response_only=True
        )

        definition = dwh_loader_submit \
            .next(dwh_loader_wait) \
            .next(dwh_loader_status) \
            .next(dwh_loader_complete
                  .when(_sfn.Condition.string_equals('$.Result.Status', 'FAILED'), dwh_loader_failed)
                  .when(_sfn.Condition.string_equals('$.Result.Status', 'FINISHED'), _sfn.Succeed(self, 'DwhLoaderSuccess'))
                  .otherwise(dwh_loader_wait))

        dwh_loader_stepfunctions = _sfn.StateMachine(
            self, 'StepFunctions',
            definition=definition,
            timeout=core.Duration.minutes(30)
        )

        step_trigger = _events.Rule(
            self, 'StepTrigger',
            schedule=_events.Schedule.cron(minute='0/30',
                                           hour='*',
                                           month='*',
                                           week_day='*',
                                           year='*')
        )

        step_trigger.add_target(
            _events_targets.SfnStateMachine(
                machine=dwh_loader_stepfunctions,
            )
        )
Ejemplo n.º 17
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        policy_name: str,
        policy_document: Any,
        timeout: Duration = None
    ) -> None:
        super().__init__(scope, id)

        if type(policy_document) == dict:
            policy_document = json.dumps(policy_document)

        account_id = Stack.of(self).account
        region = Stack.of(self).region

        # IMPORTANT! Setting resources to the exact policy name is the most restrictive we can be, but this will cause issues 
        # When trying to update the policy name. 
        # See this issue for more info: https://github.com/aws/aws-cdk/issues/14037
        # A possible work around is setting resources to 'arn:aws:iot:{region}:{account_id}:policy/*', which is more permissive.
        lambda_role = iam.Role(
            scope=self,
            id=f'{id}LambdaRole',
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            inline_policies={
                "IotPolicyProvisioningPolicy":
                    iam.PolicyDocument(statements=[
                        iam.PolicyStatement(
                            actions=[
                                "iot:ListPolicyVersions", "iot:CreatePolicy", "iot:CreatePolicyVersion", "iot:DeletePolicy",
                                "iot:DeletePolicyVersion", "iot:GetPolicy"
                            ],
                            resources=[f'arn:aws:iot:{region}:{account_id}:policy/{policy_name}'],
                            effect=iam.Effect.ALLOW,
                        )
                    ])
            },
            managed_policies=[iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole")],
        )

        if not timeout:
            timeout = Duration.minutes(5)

        with open(path.join(path.dirname(__file__), 'iot_policy_event_handler.py')) as file:
            code = file.read()

        event_handler = aws_lambda.Function(
            scope=self,
            id=f'{id}EventHandler',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            code=aws_lambda.Code.from_inline(code),
            handler='index.on_event',
            role=lambda_role,
            timeout=timeout,
        )

        with open(path.join(path.dirname(__file__), 'iot_policy_is_complete_handler.py')) as file:
            is_complete_code = file.read()

        is_complete_handler = aws_lambda.Function(
            scope=self,
            id=f'{id}IsCompleteHandler',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            code=aws_lambda.Code.from_inline(is_complete_code),
            handler='index.is_complete',
            role=lambda_role,
        )

        provider = Provider(
            scope=self, 
            id=f'{id}Provider', 
            on_event_handler=event_handler,
            is_complete_handler=is_complete_handler, 
            query_interval=Duration.minutes(2),
        )

        core.CustomResource(
            scope=self,
            id=f'{id}IotPolicy',
            service_token=provider.service_token,
            removal_policy=RemovalPolicy.DESTROY,
            resource_type="Custom::IotPolicyAsync",
            properties={
                "policy_name": policy_name,
                "policy_document": policy_document,
            },
        )
Ejemplo n.º 18
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        app = App()
        cdk_stack = Stack(app, 'network')

        # Add a tag to all constructs in the stack
        Tag.add(cdk_stack, "Created By", "umut.yalcinkaya")

        # VPC
        vpc = ec2.Vpc(self,
                      "VPC",
                      nat_gateways=0,
                      subnet_configuration=[
                          ec2.SubnetConfiguration(
                              name="public", subnet_type=ec2.SubnetType.PUBLIC)
                      ])
        # Iterate the private subnets
        # selection = vpc.select_subnets(
        #     subnet_type=ec2.SubnetType.PRIVATE
        # )

        # for subnet in selection.subnets:
        #     pass

        # AMI
        amzn_linux = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        # Instance Role and SSM Managed Policy
        role = iam.Role(self,
                        "InstanceSSM",
                        assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"))

        role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "service-role/AmazonEC2RoleforSSM"))

        # Instance
        instance = ec2.Instance(self,
                                "Instance",
                                instance_type=ec2.InstanceType("t3a.large"),
                                machine_image=amzn_linux,
                                vpc=vpc,
                                role=role,
                                key_name="umut-poc")

        instance.instance.add_property_override(
            "BlockDeviceMappings", [{
                "DeviceName": "/dev/sdb",
                "Ebs": {
                    "VolumeSize": "30",
                    "VolumeType": "gp2",
                    "DeleteOnTermination": "true"
                }
            }])

        # Script in S3 as Asset
        asset = Asset(self,
                      "Asset",
                      path=os.path.join(dirname, "configure.sh"))
        local_path = instance.user_data.add_s3_download_command(
            bucket=asset.bucket, bucket_key=asset.s3_object_key)

        # Userdata executes script from S3
        instance.user_data.add_execute_file_command(file_path=local_path)
        asset.grant_read(instance.role)
        # output information after deploy
        output = core.CfnOutput(self,
                                "BastionHost_information",
                                value=instance.instance_public_ip,
                                description="BastionHost's Public IP")
Ejemplo n.º 19
0
#!/usr/bin/env python3
from aws_cdk.core import App, Stack, CfnParameter
from stack.base_stack import BaseStack
from stack.core_stack import CoreStack

app = App()

main_stack = Stack(app, 'Main')

email_param = CfnParameter(
    main_stack, 'email',
    description='email for sns subscription').value_as_string
app_stack = CoreStack(main_stack, 'AppStack', email=email_param)
base_stack = BaseStack(main_stack, 'BaseStack', app_stack.functions.my_lambda,
                       app_stack.functions.custom_config_rds,
                       app_stack.step_fn.state_machine)

#CdkworkshopStack(app, "projetox", env={'region': 'sa-east-1', 'account': os.environ['CDK_DEFAULT_ACCOUNT']})

app.synth()
Ejemplo n.º 20
0
 def _stack_region(self):
     return Stack.of(self).region
    def __init__(self, scope: core.Construct, id: str, vpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        stack = Stack.of(self)

        self.__key = KeyPair(self,
                             "bastion-keypair",
                             name=_config.Redshift.BASTION_HOST_KEY_PAIR_NAME,
                             description="Key Pair to connect to bastion host",
                             resource_prefix="ara-redshift-bastion")

        self._bastion_sg = ec2.SecurityGroup(self,
                                             id="bastion-sg",
                                             vpc=vpc,
                                             allow_all_outbound=None,
                                             description=None,
                                             security_group_name="bastion-sg")

        # a proper ip address needs to be configured before stack deployment
        if _config.Redshift.LOCAL_IP is not None:
            self._bastion_sg.add_ingress_rule(
                ec2.Peer.ipv4(_config.Redshift.LOCAL_IP), ec2.Port.tcp(22))

        # Instance Role and SSM Managed Policy
        self._role = iam.Role(
            self,
            "InstanceSSM",
            assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"),
            role_name='bastion-host-role')

        self._role.add_to_policy(
            iam.PolicyStatement(
                actions=[
                    'secretsmanager:GetResourcePolicy',
                    'secretsmanager:GetSecretValue',
                    'secretsmanager:DescribeSecret',
                    'secretsmanager:ListSecretVersionIds'
                ],
                resources=[
                    stack.format_arn(
                        service='secretsmanager',
                        resource='secret:ec2-private-key/' +
                        _config.Redshift.BASTION_HOST_KEY_PAIR_NAME + '*')
                ]))

        self._role.add_managed_policy(
            _iam.ManagedPolicy.from_aws_managed_policy_name(
                'AmazonSSMManagedInstanceCore'))

        bastion_asg = _autoscaling.AutoScalingGroup(
            self,
            'bastion-autoscaling',
            instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3,
                                              ec2.InstanceSize.NANO),
            machine_image=ec2.AmazonLinuxImage(),
            vpc=vpc,
            key_name=_config.Redshift.BASTION_HOST_KEY_PAIR_NAME,
            role=self._role,
            security_group=self._bastion_sg,
            vpc_subnets=ec2.SubnetSelection(
                availability_zones=None,
                one_per_az=None,
                subnet_group_name=None,
                subnet_name=None,
                subnets=None,
                subnet_type=ec2.SubnetType.PRIVATE),
            cooldown=core.Duration.minutes(1),
            min_capacity=1,
            max_capacity=3,
            spot_price="0.005")

        self.__bastion_nlb = _elbv2.NetworkLoadBalancer(
            self,
            'bastion_elb',
            vpc=vpc,
            internet_facing=True,
            vpc_subnets=ec2.SubnetSelection(availability_zones=None,
                                            one_per_az=None,
                                            subnet_group_name=None,
                                            subnet_name=None,
                                            subnets=None,
                                            subnet_type=ec2.SubnetType.PUBLIC))

        listener = self.__bastion_nlb.add_listener("Listener", port=22)
        listener.add_targets("Target", port=22, targets=[bastion_asg])
Ejemplo n.º 22
0
from infra.cicd.context import BuildContext
from aws_cdk.core import App, Stack, Environment
from infra.cicd.layers.images import BuildImagesLayer
from infra.cicd.layers.buckets import BucketLayer
from infra.cicd.layers.kms import KeyLayer
from infra.cicd.layers.pipeline import CodePipelineLayer
from infra.cicd.layers.buildjobs import BuildJobLayer
src_root_dir = os.path.join(os.path.dirname(__file__), "..")

default_env = Environment(region="us-west-2")


def create_cicd_stack(infra_stack):
    context = BuildContext(env=default_env)
    context.encryption_keys = KeyLayer(infra_stack, 'EncryptionKeys')
    context.build_images = BuildImagesLayer(infra_stack, 'BuildImages')
    context.buckets = BucketLayer(infra_stack, 'Buckets', context=context)
    context.build_projects = BuildJobLayer(infra_stack,
                                           'BuildJobs',
                                           context=context)
    context.pipelines = CodePipelineLayer(infra_stack,
                                          'CodePipelines',
                                          context=context)


app = App()
infra_stack = Stack(app, 'FinSurfBuilder', env=default_env)
create_cicd_stack(infra_stack)

app.synth()
Ejemplo n.º 23
0
 def _stack_id(self):
     return Stack.of(self).stack_id
Ejemplo n.º 24
0
 def _url_suffix(self):
     return Stack.of(self).url_suffix
Ejemplo n.º 25
0
 def _format_arn(self, **kwargs):
     return Stack.of(self).format_arn(**kwargs)
Ejemplo n.º 26
0
    def _build_policy(self) -> List[iam.PolicyStatement]:
        policy = [
            iam.PolicyStatement(
                sid="Ec2",
                actions=[
                    "ec2:DescribeInstanceAttribute",
                    "ec2:DescribeInstances",
                    "ec2:DescribeInstanceStatus",
                    "ec2:CreateTags",
                    "ec2:DescribeVolumes",
                    "ec2:AttachVolume",
                ],
                effect=iam.Effect.ALLOW,
                resources=["*"],
            ),
            iam.PolicyStatement(
                sid="S3GetObj",
                actions=["s3:GetObject"],
                effect=iam.Effect.ALLOW,
                resources=[
                    self._format_arn(
                        service="s3",
                        resource="{0}-aws-parallelcluster/*".format(
                            Stack.of(self).region),
                        region="",
                        account="",
                    )
                ],
            ),
            iam.PolicyStatement(
                sid="ResourcesS3Bucket",
                effect=iam.Effect.ALLOW,
                actions=["s3:*"],
                resources=[
                    self._format_arn(service="s3",
                                     resource=self._cluster_bucket.name,
                                     region="",
                                     account=""),
                    self._format_arn(
                        service="s3",
                        resource=
                        f"{self._cluster_bucket.name}/{self._cluster_bucket.artifact_directory}/*",
                        region="",
                        account="",
                    ),
                ],
            ),
            iam.PolicyStatement(
                sid="CloudFormation",
                actions=[
                    "cloudformation:DescribeStacks",
                    "cloudformation:DescribeStackResource",
                    "cloudformation:SignalResource",
                ],
                effect=iam.Effect.ALLOW,
                resources=[
                    self._format_arn(
                        service="cloudformation",
                        resource=f"stack/{Stack.of(self).stack_name}/*"),
                    self._format_arn(
                        service="cloudformation",
                        resource=f"stack/{Stack.of(self).stack_name}-*/*"),
                ],
            ),
            iam.PolicyStatement(
                sid="DcvLicense",
                actions=[
                    "s3:GetObject",
                ],
                effect=iam.Effect.ALLOW,
                resources=[
                    self._format_arn(
                        service="s3",
                        resource="dcv-license.{0}/*".format(
                            Stack.of(self).region),
                        region="",
                        account="",
                    )
                ],
            ),
        ]

        if self._config.scheduling.scheduler != "awsbatch":
            policy.extend([
                iam.PolicyStatement(
                    sid="EC2Terminate",
                    actions=["ec2:TerminateInstances"],
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                    conditions={
                        "StringEquals": {
                            f"ec2:ResourceTag/{PCLUSTER_CLUSTER_NAME_TAG}":
                            Stack.of(self).stack_name
                        }
                    },
                ),
                iam.PolicyStatement(
                    sid="EC2RunInstances",
                    actions=["ec2:RunInstances"],
                    effect=iam.Effect.ALLOW,
                    resources=[
                        self._format_arn(service="ec2",
                                         resource=f"subnet/{subnet_id}")
                        for subnet_id in self._config.compute_subnet_ids
                    ] + [
                        self._format_arn(service="ec2",
                                         resource="network-interface/*"),
                        self._format_arn(service="ec2", resource="instance/*"),
                        self._format_arn(service="ec2", resource="volume/*"),
                        self._format_arn(
                            service="ec2",
                            resource=
                            f"key-pair/{self._config.head_node.ssh.key_name}"),
                        self._format_arn(service="ec2",
                                         resource="security-group/*"),
                        self._format_arn(service="ec2",
                                         resource="launch-template/*"),
                        self._format_arn(service="ec2",
                                         resource="placement-group/*"),
                    ] + [
                        self._format_arn(service="ec2",
                                         resource=f"image/{queue_ami}",
                                         account="")
                        for _, queue_ami in self._config.image_dict.items()
                    ],
                ),
                iam.PolicyStatement(
                    sid="PassRole",
                    actions=["iam:PassRole"],
                    effect=iam.Effect.ALLOW,
                    resources=self._generate_head_node_pass_role_resources(),
                ),
            ])

        if self._config.scheduling.scheduler == "plugin":
            cluster_shared_artifacts = get_attr(
                self._config,
                "scheduling.settings.scheduler_definition.plugin_resources.cluster_shared_artifacts"
            )
            if cluster_shared_artifacts:
                for artifacts in cluster_shared_artifacts:
                    if get_url_scheme(artifacts.source) == "s3":
                        bucket_info = parse_bucket_url(artifacts.source)
                        bucket_name = bucket_info.get("bucket_name")
                        object_key = bucket_info.get("object_key")
                        policy.extend([
                            iam.PolicyStatement(
                                actions=["s3:GetObject"],
                                effect=iam.Effect.ALLOW,
                                resources=[
                                    self._format_arn(
                                        region="",
                                        service="s3",
                                        account="",
                                        resource=bucket_name,
                                        resource_name=object_key,
                                    )
                                ],
                            ),
                        ])
        if self._config.directory_service:
            policy.append(
                iam.PolicyStatement(
                    actions=["secretsmanager:GetSecretValue"],
                    effect=iam.Effect.ALLOW,
                    resources=[
                        self._config.directory_service.password_secret_arn
                    ],
                ))

        return policy
Ejemplo n.º 27
0
 def _stack_account(self):
     return Stack.of(self).account
#!/usr/bin/env python3
import os.path
from aws_cdk.core import App, Stack, Environment
from infra.exports import create_layers
src_root_dir = os.path.join(os.path.dirname(__file__))

default_env = Environment(region="us-east-2")

app = App()
infra_stack = Stack(app, 'DroidAnlyz', env=default_env)
create_layers(infra_stack)
app.synth()
    def __init__(self, scope: core.Construct, id: str, application_prefix: str,
                 suffix: str, kda_role: Role, **kwargs):
        super().__init__(scope, id, **kwargs)

        stack = Stack.of(self)
        region = stack.region

        # Create Cognito User Pool
        self.__user_pool = CfnUserPool(
            scope=self,
            id='UserPool',
            admin_create_user_config={'allowAdminCreateUserOnly': True},
            policies={'passwordPolicy': {
                'minimumLength': 8
            }},
            username_attributes=['email'],
            auto_verified_attributes=['email'],
            user_pool_name=application_prefix + '_user_pool')

        # Create a Cognito User Pool Domain using the newly created Cognito User Pool
        CfnUserPoolDomain(scope=self,
                          id='CognitoDomain',
                          domain=application_prefix + '-' + suffix,
                          user_pool_id=self.user_pool.ref)

        # Create Cognito Identity Pool
        self.__id_pool = CfnIdentityPool(
            scope=self,
            id='IdentityPool',
            allow_unauthenticated_identities=False,
            cognito_identity_providers=[],
            identity_pool_name=application_prefix + '_identity_pool')

        trust_relationship = FederatedPrincipal(
            federated='cognito-identity.amazonaws.com',
            conditions={
                'StringEquals': {
                    'cognito-identity.amazonaws.com:aud': self.id_pool.ref
                },
                'ForAnyValue:StringLike': {
                    'cognito-identity.amazonaws.com:amr': 'authenticated'
                }
            },
            assume_role_action='sts:AssumeRoleWithWebIdentity')
        # IAM role for master user
        master_auth_role = Role(scope=self,
                                id='MasterAuthRole',
                                assumed_by=trust_relationship)
        # Role for authenticated user
        limited_auth_role = Role(scope=self,
                                 id='LimitedAuthRole',
                                 assumed_by=trust_relationship)
        # Attach Role to Identity Pool
        CfnIdentityPoolRoleAttachment(
            scope=self,
            id='userPoolRoleAttachment',
            identity_pool_id=self.id_pool.ref,
            roles={'authenticated': limited_auth_role.role_arn})
        # Create master-user-group
        CfnUserPoolGroup(scope=self,
                         id='AdminsGroup',
                         user_pool_id=self.user_pool.ref,
                         group_name='master-user-group',
                         role_arn=master_auth_role.role_arn)
        # Create limited-user-group
        CfnUserPoolGroup(scope=self,
                         id='UsersGroup',
                         user_pool_id=self.user_pool.ref,
                         group_name='limited-user-group',
                         role_arn=limited_auth_role.role_arn)
        # Role for the Elasticsearch service to access Cognito
        es_role = Role(scope=self,
                       id='EsRole',
                       assumed_by=ServicePrincipal(service='es.amazonaws.com'),
                       managed_policies=[
                           ManagedPolicy.from_aws_managed_policy_name(
                               'AmazonESCognitoAccess')
                       ])

        # Use the following command line to generate the python dependencies layer content
        # pip3 install -t lambda-layer/python/lib/python3.8/site-packages -r lambda/requirements.txt
        # Build the lambda layer assets
        subprocess.call([
            'pip', 'install', '-t',
            'streaming/streaming_cdk/lambda-layer/python/lib/python3.8/site-packages',
            '-r', 'streaming/streaming_cdk/bootstrap-lambda/requirements.txt',
            '--upgrade'
        ])

        requirements_layer = _lambda.LayerVersion(
            scope=self,
            id='PythonRequirementsTemplate',
            code=_lambda.Code.from_asset(
                'streaming/streaming_cdk/lambda-layer'),
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_8])

        # This lambda function will bootstrap the Elasticsearch cluster
        bootstrap_function_name = 'AESBootstrap'
        register_template_lambda = _lambda.Function(
            scope=self,
            id='RegisterTemplate',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset(
                'streaming/streaming_cdk/bootstrap-lambda'),
            handler='es-bootstrap.lambda_handler',
            environment={
                'REGION': region,
                'KDA_ROLE_ARN': kda_role.role_arn,
                'MASTER_ROLE_ARN': master_auth_role.role_arn
            },
            layers=[requirements_layer],
            timeout=Duration.minutes(15),
            function_name=bootstrap_function_name)

        lambda_role = register_template_lambda.role
        lambda_role.add_to_policy(
            PolicyStatement(
                actions=['logs:CreateLogGroup'],
                resources=[stack.format_arn(service='logs', resource='*')]))
        lambda_role.add_to_policy(
            PolicyStatement(
                actions=['logs:CreateLogStream', 'logs:PutLogEvents'],
                resources=[
                    stack.format_arn(service='logs',
                                     resource='log_group',
                                     resource_name='/aws/lambda/' +
                                     bootstrap_function_name + ':*')
                ]))

        # Let the lambda assume the master role so that actions can be executed on the cluster
        # https://aws.amazon.com/premiumsupport/knowledge-center/lambda-function-assume-iam-role/
        lambda_role.add_to_policy(
            PolicyStatement(actions=['sts:AssumeRole'],
                            resources=[master_auth_role.role_arn]))

        master_auth_role.assume_role_policy.add_statements(
            PolicyStatement(actions=['sts:AssumeRole'],
                            principals=[lambda_role]))

        # List all the roles that are allowed to access the Elasticsearch cluster.
        roles = [
            ArnPrincipal(limited_auth_role.role_arn),
            ArnPrincipal(master_auth_role.role_arn),
            ArnPrincipal(kda_role.role_arn)
        ]  # The users
        if register_template_lambda and register_template_lambda.role:
            roles.append(ArnPrincipal(
                lambda_role.role_arn))  # The lambda used to bootstrap
        # Create kms key
        kms_key = Key(scope=self,
                      id='kms-es',
                      alias='custom/es',
                      description='KMS key for Elasticsearch domain',
                      enable_key_rotation=True)

        # AES Log Groups
        es_app_log_group = logs.LogGroup(scope=self,
                                         id='EsAppLogGroup',
                                         retention=logs.RetentionDays.ONE_WEEK,
                                         removal_policy=RemovalPolicy.RETAIN)

        # Create the Elasticsearch domain
        es_domain_arn = stack.format_arn(service='es',
                                         resource='domain',
                                         resource_name=application_prefix +
                                         '/*')

        es_access_policy = PolicyDocument(statements=[
            PolicyStatement(principals=roles,
                            actions=[
                                'es:ESHttpGet', 'es:ESHttpPut',
                                'es:ESHttpPost', 'es:ESHttpDelete'
                            ],
                            resources=[es_domain_arn])
        ])
        self.__es_domain = es.CfnDomain(
            scope=self,
            id='searchDomain',
            elasticsearch_cluster_config={
                'instanceType': 'r5.large.elasticsearch',
                'instanceCount': 2,
                'dedicatedMasterEnabled': True,
                'dedicatedMasterCount': 3,
                'dedicatedMasterType': 'r5.large.elasticsearch',
                'zoneAwarenessEnabled': True,
                'zoneAwarenessConfig': {
                    'AvailabilityZoneCount': '2'
                },
            },
            encryption_at_rest_options={
                'enabled': True,
                'kmsKeyId': kms_key.key_id
            },
            node_to_node_encryption_options={'enabled': True},
            ebs_options={
                'volumeSize': 10,
                'ebsEnabled': True
            },
            elasticsearch_version='7.9',
            domain_name=application_prefix,
            access_policies=es_access_policy,
            cognito_options={
                'enabled': True,
                'identityPoolId': self.id_pool.ref,
                'roleArn': es_role.role_arn,
                'userPoolId': self.user_pool.ref
            },
            advanced_security_options={
                'enabled': True,
                'internalUserDatabaseEnabled': False,
                'masterUserOptions': {
                    'masterUserArn': master_auth_role.role_arn
                }
            },
            domain_endpoint_options={
                'enforceHttps': True,
                'tlsSecurityPolicy': 'Policy-Min-TLS-1-2-2019-07'
            },
            # log_publishing_options={
            #     # 'ES_APPLICATION_LOGS': {
            #     #     'enabled': True,
            #     #     'cloud_watch_logs_log_group_arn': es_app_log_group.log_group_arn
            #     # },
            #     # 'AUDIT_LOGS': {
            #     #     'enabled': True,
            #     #     'cloud_watch_logs_log_group_arn': ''
            #     # },
            #     # 'SEARCH_SLOW_LOGS': {
            #     #     'enabled': True,
            #     #     'cloud_watch_logs_log_group_arn': ''
            #     # },
            #     # 'INDEX_SLOW_LOGS': {
            #     #     'enabled': True,
            #     #     'cloud_watch_logs_log_group_arn': ''
            #     # }
            # }
        )

        # Not yet on the roadmap...
        # See https://github.com/aws-cloudformation/aws-cloudformation-coverage-roadmap/issues/283
        # self.es_domain.add_property_override('ElasticsearchClusterConfig.WarmEnabled', True)
        # self.es_domain.add_property_override('ElasticsearchClusterConfig.WarmCount', 2)
        # self.es_domain.add_property_override('ElasticsearchClusterConfig.WarmType', 'ultrawarm1.large.elasticsearch')

        # Deny all roles from the authentication provider - users must be added to groups
        # This lambda function will bootstrap the Elasticsearch cluster
        cognito_function_name = 'CognitoFix'
        cognito_template_lambda = _lambda.Function(
            scope=self,
            id='CognitoFixLambda',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset(
                'streaming/streaming_cdk/cognito-lambda'),
            handler='handler.handler',
            environment={
                'REGION': scope.region,
                'USER_POOL_ID': self.__user_pool.ref,
                'IDENTITY_POOL_ID': self.__id_pool.ref,
                'LIMITED_ROLE_ARN': limited_auth_role.role_arn
            },
            timeout=Duration.minutes(15),
            function_name=cognito_function_name)

        lambda_role = cognito_template_lambda.role
        lambda_role.add_to_policy(
            PolicyStatement(
                actions=['logs:CreateLogGroup'],
                resources=[stack.format_arn(service='logs', resource='*')]))
        lambda_role.add_to_policy(
            PolicyStatement(
                actions=['logs:CreateLogStream', 'logs:PutLogEvents'],
                resources=[
                    stack.format_arn(service='logs',
                                     resource='log_group',
                                     resource_name='/aws/lambda/' +
                                     cognito_function_name + ':*')
                ]))
        lambda_role.add_to_policy(
            PolicyStatement(actions=['cognito-idp:ListUserPoolClients'],
                            resources=[self.user_pool.attr_arn]))
        lambda_role.add_to_policy(
            PolicyStatement(actions=['iam:PassRole'],
                            resources=[limited_auth_role.role_arn]))

        cognito_id_res = Fn.join(':', [
            'arn:aws:cognito-identity', scope.region, scope.account,
            Fn.join('/', ['identitypool', self.__id_pool.ref])
        ])

        lambda_role.add_to_policy(
            PolicyStatement(actions=['cognito-identity:SetIdentityPoolRoles'],
                            resources=[cognito_id_res]))

        # Get the Domain Endpoint and register it with the lambda as environment variable.
        register_template_lambda.add_environment(
            'DOMAIN', self.__es_domain.attr_domain_endpoint)

        CfnOutput(scope=self,
                  id='createUserUrl',
                  description="Create a new user in the user pool here.",
                  value="https://" + scope.region +
                  ".console.aws.amazon.com/cognito/users?region=" +
                  scope.region + "#/pool/" + self.user_pool.ref + "/users")
        CfnOutput(scope=self,
                  id='kibanaUrl',
                  description="Access Kibana via this URL.",
                  value="https://" + self.__es_domain.attr_domain_endpoint +
                  "/_plugin/kibana/")

        bootstrap_lambda_provider = Provider(
            scope=self,
            id='BootstrapLambdaProvider',
            on_event_handler=register_template_lambda)
        CustomResource(scope=self,
                       id='ExecuteRegisterTemplate',
                       service_token=bootstrap_lambda_provider.service_token,
                       properties={'Timeout': 900})

        cognito_lambda_provider = Provider(
            scope=self,
            id='CognitoFixLambdaProvider',
            on_event_handler=cognito_template_lambda)
        cognito_fix_resource = CustomResource(
            scope=self,
            id='ExecuteCognitoFix',
            service_token=cognito_lambda_provider.service_token)
        cognito_fix_resource.node.add_dependency(self.__es_domain)
Ejemplo n.º 30
0
    def __init__(self, scope: core.Construct, id: str, es_domain: CfnDomain, kda_role: iam.Role,
                 source_bucket: s3.Bucket, dest_bucket: s3.Bucket, **kwargs):
        super().__init__(scope, id, **kwargs)

        stack = Stack.of(self)

        kda_role.add_to_policy(PolicyStatement(actions=['cloudwatch:PutMetricData'],
                                               resources=['*']))

        artifacts_bucket_arn = 'arn:aws:s3:::' + _config.ARA_BUCKET.replace("s3://", "")
        kda_role.add_to_policy(PolicyStatement(actions=['s3:GetObject', 's3:GetObjectVersion'],
                                               resources=[artifacts_bucket_arn, artifacts_bucket_arn + '/binaries/*']))
        log_group = logs.LogGroup(scope=self,
                                  id='KdaLogGroup',
                                  retention=logs.RetentionDays.ONE_WEEK,
                                  removal_policy=RemovalPolicy.DESTROY)

        log_stream = logs.LogStream(scope=self,
                                    id='KdaLogStream',
                                    log_group=log_group,
                                    removal_policy=RemovalPolicy.DESTROY)

        log_stream_arn = stack.format_arn(service='logs',
                                          resource='log-group',
                                          resource_name=log_group.log_group_name + ':log-stream:' +
                                                        log_stream.log_stream_name,
                                          sep=':')

        # TODO: restrict
        kda_role.add_to_policy(PolicyStatement(actions=['logs:*'],
                                               resources=[stack.format_arn(service='logs', resource='*')]))

        kda_role.add_to_policy(PolicyStatement(actions=['logs:DescribeLogStreams', 'logs:DescribeLogGroups'],
                                               resources=[log_group.log_group_arn,
                                                          stack.format_arn(service='logs', resource='log-group',
                                                                           resource_name='*')]))

        kda_role.add_to_policy(PolicyStatement(actions=['logs:PutLogEvents'],
                                               resources=[log_stream_arn]))

        kda_role.add_to_policy(PolicyStatement(actions=['es:ESHttp*'],
                                               resources=[stack.format_arn(service='es', resource='domain',
                                                                           resource_name=es_domain.domain_name + '/*')]))

        # TODO: restrict
        kda_role.add_to_policy(PolicyStatement(actions=['s3:*'],
                                               resources=['arn:aws:s3::::*']))

        # Define delivery stream
        # delivery_stream_name = 'clean_delivery_stream'
        #
        # s3_configuration = {
        #     'bucketArn': '',
        #     'compressionFormat': 'Snappy',
        #     'dataFormatConversionConfiguration': {
        #         'enabled': True,
        #         'inputFormatConfiguration': {'deserializer': },
        #         'outputFormatConfiguration': {'serializer': {'parquetSerDe': }},
        #         'schemaConfiguration': {}
        #     },
        #     'prefix': 'streaming'
        # }
        #
        # delivery_stream = CfnDeliveryStream(scope=self,
        #                                     id='Firehose Delivery Stream',
        #                                     delivery_stream_name=delivery_stream_name,
        #                                     delivery_stream_type='DirectPut',
        #                                     extended_s3_destination_configuration=s3_configuration
        #                                     )

        # Define KDA application
        application_configuration = {
            'environmentProperties': {
                'propertyGroups': [
                    {
                        'propertyGroupId': 'ConsumerConfigProperties',
                        'propertyMap': {
                            'CustomerStream': scope.customer_stream.stream_name,
                            'AddressStream': scope.address_stream.stream_name,
                            'SaleStream': scope.sale_stream.stream_name,
                            'PromoDataPath': source_bucket.s3_url_for_object('promo'),
                            'ItemDataPath': source_bucket.s3_url_for_object('item'),
                            'aws.region': scope.region
                        }
                    },
                    {
                        'propertyGroupId': 'ProducerConfigProperties',
                        'propertyMap': {
                            'ElasticsearchHost': 'https://' + es_domain.attr_domain_endpoint + ':443',
                            'Region': scope.region,
                            'DenormalizedSalesS3Path': dest_bucket.s3_url_for_object() + '/',
                            'IndexName': 'ara-write'
                        }
                    }
                ]
            },
            'applicationCodeConfiguration': {
                'codeContent': {
                    's3ContentLocation': {
                        'bucketArn': artifacts_bucket_arn,
                        'fileKey': 'binaries/stream-processing-1.1.jar'
                    }
                },
                'codeContentType': 'ZIPFILE'
            },
            'flinkApplicationConfiguration': {
                'parallelismConfiguration': {
                    'configurationType': 'DEFAULT'
                },
                'checkpointConfiguration': {
                    'configurationType': 'DEFAULT'
                },
                'monitoringConfiguration': {
                    'logLevel': 'DEBUG',
                    'metricsLevel': 'TASK',
                    'configurationType': 'CUSTOM'
                }
            },
            'applicationSnapshotConfiguration': {
                'snapshotsEnabled': False
            }
        }

        self.__app = CfnApplicationV2(scope=self,
                                      id='KDA application',
                                      runtime_environment='FLINK-1_11',
                                      application_name='KDA-application',
                                      service_execution_role=kda_role.role_arn,
                                      application_configuration=application_configuration)

        logging = CfnApplicationCloudWatchLoggingOptionV2(scope=self, id='KDA application logging',
                                                          application_name=self.__app.ref,
                                                          cloud_watch_logging_option={'logStreamArn': log_stream_arn})

        logging.apply_removal_policy(policy=RemovalPolicy.RETAIN, apply_to_update_replace_policy=True,
                                     default=RemovalPolicy.RETAIN)

        # Use a custom resource to start the application
        create_params = {
            'ApplicationName': self.__app.ref,
            'RunConfiguration': {
                'ApplicationRestoreConfiguration': {
                    'ApplicationRestoreType': 'SKIP_RESTORE_FROM_SNAPSHOT'
                },
                'FlinkRunConfiguration': {
                    'AllowNonRestoredState': True
                }
            }
        }

        # See https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/ for service name, actions and parameters
        create_action = AwsSdkCall(service='KinesisAnalyticsV2',
                                   action='startApplication',
                                   parameters=create_params,
                                   physical_resource_id=PhysicalResourceId.of(self.__app.ref + '-start'))

        delete_action = AwsSdkCall(service='KinesisAnalyticsV2',
                                   action='stopApplication',
                                   parameters={'ApplicationName': self.__app.ref, 'Force': True})

        custom_resource = AwsCustomResource(scope=self,
                                            id='KdaStartAndStop',
                                            on_create=create_action,
                                            on_delete=delete_action,
                                            policy=AwsCustomResourcePolicy.from_statements([PolicyStatement(
                                                actions=['kinesisanalytics:StartApplication',
                                                         'kinesisanalytics:StopApplication',
                                                         'kinesisanalytics:DescribeApplication',
                                                         'kinesisanalytics:UpdateApplication'], resources=[
                                                    stack.format_arn(service='kinesisanalytics', resource='application',
                                                                     resource_name=self.app.application_name)])]))

        custom_resource.node.add_dependency(self.app)