def __init__(self, scope: core.Construct, id: str, put_bucket: aws_s3.Bucket, **kwargs) -> None: super().__init__(scope, id, **kwargs) # lambda lambda_ = aws_lambda.Function( self, "SameResourceLambda", code=aws_lambda.Code.asset("lambdas/same_resource"), runtime=aws_lambda.Runtime.PYTHON_3_8, handler="lambda_function.lambda_handler", environment={"PUT_BUCKET_NAME": put_bucket.bucket_name}, ) # lambdaの権限 put_bucket.grant_write(lambda_)
def __init__(self, scope, id, cluster: ecs.Cluster, tracks_table: dynamodb.Table, input_bucket: s3.Bucket, output_bucket: s3.Bucket, **kwargs): super().__init__(scope, id, **kwargs) worker_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), 'worker')) self.service = ecs_patterns.QueueProcessingFargateService( self, 'separator-service', cluster=cluster, cpu=2048, memory_limit_mib=8192, image=ecs.ContainerImage.from_asset(directory=worker_dir), environment={ 'TRACKS_TABLE_NAME': tracks_table.table_name, 'OUTPUT_BUCKET_NAME': output_bucket.bucket_name }) input_bucket.grant_read(self.service.task_definition.task_role) output_bucket.grant_write(self.service.task_definition.task_role) tracks_table.grant_read_write_data( self.service.task_definition.task_role)
def __init__(self, scope: core.Construct, id: str, prefix: str, source_bucket: s3.Bucket, dest_bucket: s3.Bucket, **kwargs) -> None: super().__init__(scope, id, **kwargs) suffix = Fn.select( 4, Fn.split('-', Fn.select(2, Fn.split('/', self.stack_id)))) # KMS key for Kinesis Data Streams self.__kms_key = Key(scope=self, id='kms-kinesis', alias='custom/kinesis', description='KMS key for Kinesis Data Streams', enable_key_rotation=True) # Create Kinesis streams self.__sale_stream = Stream(scope=self, id="saleStream", stream_name="ara-web-sale", encryption_key=self.__kms_key) self.__address_stream = Stream(scope=self, id="addressStream", stream_name="ara-web-customer-address", encryption_key=self.__kms_key) self.__customer_stream = Stream(scope=self, id="customerStream", stream_name="ara-web-customer", encryption_key=self.__kms_key) # Role for the KDA service kda_role = Role(scope=self, id='KinesisAnalyticsRole', assumed_by=ServicePrincipal( service='kinesisanalytics.amazonaws.com')) # Grant read on Kinesis streams self.__customer_stream.grant_read(kda_role) self.__address_stream.grant_read(kda_role) self.__sale_stream.grant_read(kda_role) # Grant read on source bucket (reference data) source_bucket.grant_read(kda_role) # Grant write on destination bucket dest_bucket.grant_write(kda_role) kda_role.add_to_policy( PolicyStatement(actions=['kinesis:ListShards'], resources=[ self.__customer_stream.stream_arn, self.__address_stream.stream_arn, self.__sale_stream.stream_arn ])) # Create Elasticsearch domain # TODO: use VPC subnets es_domain = EsDomain(scope=self, id='EsDomain', application_prefix=prefix, suffix=suffix, kda_role=kda_role) # Create the KDA application after the Elasticsearch service kda_app = KdaApplication(scope=self, id='KdaApplication', es_domain=es_domain.es_domain, kda_role=kda_role, source_bucket=source_bucket, dest_bucket=dest_bucket) core.Tags.of(self).add('module-name', 'streaming')