Ejemplo n.º 1
0
 def __init__(
     self,
     scope: core.Construct,
     id: str,
     function_name: str,
     handler: str,
     config_bucket: aws_s3.Bucket,
     state_table: aws_dynamodb.Table,
     dependency_layer: aws_lambda.LayerVersion,
     api: aws_apigateway.RestApi,
     endpoint: str,
 ) -> None:
     super().__init__(scope, id)
     environment = {
         'bridge_env': 'PROD',
         'bridge_config': f's3://{config_bucket.bucket_name}/bridge.json',
         'state_dynamodb_table': state_table.table_name,
     }
     self.function = aws_lambda.Function(
         self,
         function_name,
         function_name=function_name,
         runtime=aws_lambda.Runtime.PYTHON_3_8,
         layers=[dependency_layer],
         code=code_asset,
         handler=handler,
         timeout=core.Duration.seconds(30),
         retry_attempts=0,
         environment=environment,
     )
     function_resource = api.root.add_resource(endpoint)
     function_resource.add_method(
         'POST', aws_apigateway.LambdaIntegration(handler=self.function, ))
     config_bucket.grant_read(self.function)
     state_table.grant_write_data(self.function)
Ejemplo n.º 2
0
    def __init__(
        self,
        scope: Construct,
        stack_id: str,
        *,
        deploy_env: str,
        storage_bucket: aws_s3.Bucket,
        **kwargs: Any,
    ) -> None:
        super().__init__(scope, stack_id, **kwargs)

        account_principal = aws_iam.AccountPrincipal(account_id=276514628126)
        role = aws_iam.Role(
            self,
            "koordinates-read-role",
            role_name=f"koordinates-s3-access-read-{deploy_env}",
            assumed_by=account_principal,  # type: ignore[arg-type]
            external_id={"prod":
                         "koordinates-jAddR"}.get(deploy_env,
                                                  "koordinates-4BnJQ"),
            max_session_duration=MAX_SESSION_DURATION,
        )
        storage_bucket.grant_read(role)  # type: ignore[arg-type]

        Tags.of(self).add("ApplicationLayer", "lds")  # type: ignore[arg-type]
Ejemplo n.º 3
0
    def authorize_input_bucket(self,
                               bucket: s3.Bucket,
                               objects_key_pattern: Optional[str] = None):
        if self._rehydrated and not self._mutable_instance_role:
            raise ReadOnlyEMRProfileError()

        bucket.grant_read(self._roles.instance_role,
                          objects_key_pattern).assert_success()
        return self
Ejemplo n.º 4
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # configure S3 origin...

        bucket = Bucket(self,
                        "bucket",
                        block_public_access=BlockPublicAccess.BLOCK_ALL,
                        bucket_name=environ.get("BUCKET_NAME", None))

        identity = OriginAccessIdentity(self,
                                        "cloudFrontIAMUser",
                                        comment="cloud front identity")

        bucket.grant_read(identity)

        apply_removal_policy(bucket)

        default_behavior = Behavior(
            is_default_behavior=True,
            lambda_function_associations=[
                LambdaFunctionAssociation(
                    lambda_function=self.init_lambda(),
                    event_type=LambdaEdgeEventType.VIEWER_REQUEST)
            ])

        source_config = SourceConfiguration(
            s3_origin_source=S3OriginConfig(s3_bucket_source=bucket,
                                            origin_access_identity=identity),
            behaviors=[default_behavior],
        )

        cert = None
        domain_name = environ.get("DOMAIN_NAME", None)

        if domain_name is not None:
            cert = ViewerCertificate.from_acm_certificate(
                self.init_certificate(domain_name),
                aliases=[domain_name],
                security_policy=SecurityPolicyProtocol.TLS_V1_2_2018)

        distribution = CloudFrontWebDistribution(self,
                                                 "CloudFront",
                                                 origin_configs=[
                                                     source_config,
                                                 ],
                                                 viewer_certificate=cert)
Ejemplo n.º 5
0
    def __init__(self, scope: Construct, stack_id: str, *, env_name: str,
                 storage_bucket: aws_s3.Bucket) -> None:
        super().__init__(scope, stack_id)

        account_principal = aws_iam.AccountPrincipal(account_id=276514628126)
        external_id = {
            PRODUCTION_ENVIRONMENT_NAME: "koordinates-jAddR"
        }.get(env_name, "koordinates-4BnJQ")
        role = aws_iam.Role(
            self,
            "koordinates-read-role",
            role_name=f"koordinates-s3-access-read-{env_name}",
            assumed_by=account_principal,  # type: ignore[arg-type]
            external_id=external_id,
            max_session_duration=MAX_SESSION_DURATION,
        )
        storage_bucket.grant_read(role)  # type: ignore[arg-type]

        Tags.of(self).add("ApplicationLayer", "lds")  # type: ignore[arg-type]
Ejemplo n.º 6
0
    def __init__(self, scope, id, cluster: ecs.Cluster,
                 tracks_table: dynamodb.Table, input_bucket: s3.Bucket,
                 output_bucket: s3.Bucket, **kwargs):
        super().__init__(scope, id, **kwargs)

        worker_dir = os.path.abspath(
            os.path.join(os.path.dirname(__file__), 'worker'))

        self.service = ecs_patterns.QueueProcessingFargateService(
            self,
            'separator-service',
            cluster=cluster,
            cpu=2048,
            memory_limit_mib=8192,
            image=ecs.ContainerImage.from_asset(directory=worker_dir),
            environment={
                'TRACKS_TABLE_NAME': tracks_table.table_name,
                'OUTPUT_BUCKET_NAME': output_bucket.bucket_name
            })

        input_bucket.grant_read(self.service.task_definition.task_role)
        output_bucket.grant_write(self.service.task_definition.task_role)
        tracks_table.grant_read_write_data(
            self.service.task_definition.task_role)
    def __init__(self, scope: core.Construct, id: str, prefix: str,
                 source_bucket: s3.Bucket, dest_bucket: s3.Bucket,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        suffix = Fn.select(
            4, Fn.split('-', Fn.select(2, Fn.split('/', self.stack_id))))

        # KMS key for Kinesis Data Streams
        self.__kms_key = Key(scope=self,
                             id='kms-kinesis',
                             alias='custom/kinesis',
                             description='KMS key for Kinesis Data Streams',
                             enable_key_rotation=True)

        # Create Kinesis streams
        self.__sale_stream = Stream(scope=self,
                                    id="saleStream",
                                    stream_name="ara-web-sale",
                                    encryption_key=self.__kms_key)
        self.__address_stream = Stream(scope=self,
                                       id="addressStream",
                                       stream_name="ara-web-customer-address",
                                       encryption_key=self.__kms_key)
        self.__customer_stream = Stream(scope=self,
                                        id="customerStream",
                                        stream_name="ara-web-customer",
                                        encryption_key=self.__kms_key)

        # Role for the KDA service
        kda_role = Role(scope=self,
                        id='KinesisAnalyticsRole',
                        assumed_by=ServicePrincipal(
                            service='kinesisanalytics.amazonaws.com'))

        # Grant read on Kinesis streams
        self.__customer_stream.grant_read(kda_role)
        self.__address_stream.grant_read(kda_role)
        self.__sale_stream.grant_read(kda_role)

        # Grant read on source bucket (reference data)
        source_bucket.grant_read(kda_role)
        # Grant write on destination bucket
        dest_bucket.grant_write(kda_role)

        kda_role.add_to_policy(
            PolicyStatement(actions=['kinesis:ListShards'],
                            resources=[
                                self.__customer_stream.stream_arn,
                                self.__address_stream.stream_arn,
                                self.__sale_stream.stream_arn
                            ]))

        # Create Elasticsearch domain
        # TODO: use VPC subnets
        es_domain = EsDomain(scope=self,
                             id='EsDomain',
                             application_prefix=prefix,
                             suffix=suffix,
                             kda_role=kda_role)

        # Create the KDA application after the Elasticsearch service
        kda_app = KdaApplication(scope=self,
                                 id='KdaApplication',
                                 es_domain=es_domain.es_domain,
                                 kda_role=kda_role,
                                 source_bucket=source_bucket,
                                 dest_bucket=dest_bucket)

        core.Tags.of(self).add('module-name', 'streaming')