def create_log_group_and_stream(self) -> aws_logs.LogGroup:
     log_group = aws_logs.LogGroup(
         self,
         "integ_test_firehose_delivery_log_group",
         log_group_name=FirehoseStack.LOG_GROUP_NAME,
         removal_policy=core.RemovalPolicy.DESTROY,
         retention=aws_logs.RetentionDays.FIVE_DAYS,
     )
     aws_logs.LogStream(
         self,
         "integ_test_firehose_delivery_log_stream",
         log_group=log_group,
         log_stream_name=FirehoseStack.LOG_STREAM_NAME,
         removal_policy=core.RemovalPolicy.DESTROY,
     )
     return log_group
Exemplo n.º 2
0
    def __init__(self, scope: core.Stack, id: str, vpc, cluster,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        self.vpc = vpc
        self.cluster = cluster

        # Building a custom image for jenkins leader.
        self.container_image = ecr.DockerImageAsset(
            self, "JenkinsWorkerDockerImage", directory='./docker/worker/')

        # Security group to connect workers to leader
        self.worker_security_group = ec2.SecurityGroup(
            self,
            "WorkerSecurityGroup",
            vpc=self.vpc,
            description="Jenkins Worker access to Jenkins leader",
        )

        # IAM execution role for the workers to pull from ECR and push to CloudWatch logs
        self.worker_execution_role = iam.Role(
            self,
            "WorkerExecutionRole",
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'),
        )

        self.worker_execution_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                'service-role/AmazonECSTaskExecutionRolePolicy'))

        # Task role for worker containers - add to this role for any aws resources that jenkins requires access to
        self.worker_task_role = iam.Role(
            self,
            "WorkerTaskRole",
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'),
        )

        # Create log group for workers to log
        self.worker_logs_group = logs.LogGroup(
            self, "WorkerLogGroup", retention=logs.RetentionDays.ONE_DAY)

        # Create log stream for worker log group
        self.worker_log_stream = logs.LogStream(
            self, "WorkerLogStream", log_group=self.worker_logs_group)
Exemplo n.º 3
0
    def __init__(self, scope: Construct, construct_id: str,
                 **kwargs: str) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self.vpc = ec2.Vpc(
            self,
            "aws-data-wrangler-vpc",
            cidr="11.19.224.0/19",
            enable_dns_hostnames=True,
            enable_dns_support=True,
        )
        Tags.of(self.vpc).add("Name", "aws-data-wrangler")
        self.key = kms.Key(
            self,
            id="aws-data-wrangler-key",
            description="Aws Data Wrangler Test Key.",
            policy=iam.PolicyDocument(statements=[
                iam.PolicyStatement(
                    sid="Enable IAM User Permissions",
                    effect=iam.Effect.ALLOW,
                    actions=["kms:*"],
                    principals=[iam.AccountRootPrincipal()],
                    resources=["*"],
                )
            ]),
        )
        kms.Alias(
            self,
            "aws-data-wrangler-key-alias",
            alias_name="alias/aws-data-wrangler-key",
            target_key=self.key,
        )
        self.bucket = s3.Bucket(
            self,
            id="aws-data-wrangler",
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True,
            ),
            lifecycle_rules=[
                s3.LifecycleRule(
                    id="CleaningUp",
                    enabled=True,
                    expiration=Duration.days(1),
                    abort_incomplete_multipart_upload_after=Duration.days(1),
                ),
            ],
            versioned=True,
        )
        glue_db = glue.Database(
            self,
            id="aws_data_wrangler_glue_database",
            database_name="aws_data_wrangler",
            location_uri=f"s3://{self.bucket.bucket_name}",
        )
        log_group = logs.LogGroup(
            self,
            id="aws_data_wrangler_log_group",
            retention=logs.RetentionDays.ONE_MONTH,
        )
        log_stream = logs.LogStream(
            self,
            id="aws_data_wrangler_log_stream",
            log_group=log_group,
        )
        CfnOutput(self, "Region", value=self.region)
        CfnOutput(
            self,
            "VPC",
            value=self.vpc.vpc_id,
            export_name="aws-data-wrangler-base-VPC",
        )
        CfnOutput(
            self,
            "PublicSubnet1",
            value=self.vpc.public_subnets[0].subnet_id,
            export_name="aws-data-wrangler-base-PublicSubnet1",
        )
        CfnOutput(
            self,
            "PublicSubnet2",
            value=self.vpc.public_subnets[1].subnet_id,
            export_name="aws-data-wrangler-base-PublicSubnet2",
        )
        CfnOutput(
            self,
            "PrivateSubnet",
            value=self.vpc.private_subnets[0].subnet_id,
            export_name="aws-data-wrangler-base-PrivateSubnet",
        )
        CfnOutput(
            self,
            "KmsKeyArn",
            value=self.key.key_arn,
            export_name="aws-data-wrangler-base-KmsKeyArn",
        )
        CfnOutput(
            self,
            "BucketName",
            value=self.bucket.bucket_name,
            export_name="aws-data-wrangler-base-BucketName",
        )
        CfnOutput(self, "GlueDatabaseName", value=glue_db.database_name)
        CfnOutput(self, "LogGroupName", value=log_group.log_group_name)
        CfnOutput(self, "LogStream", value=log_stream.log_stream_name)
Exemplo n.º 4
0
    def _build_firehose_delivery_stream(self, *, stack, vpc_db_instance):

        self.kfh_log_group = logs.LogGroup(
            stack,
            "exampledeliverystreamloggroup",
            log_group_name="/aws/kinesisfirehose/exampledeliverystream")

        self.kfh_es_log_stream = logs.LogStream(stack,
                                                "deliverytoeslogstream",
                                                log_stream_name="deliverytoes",
                                                log_group=self.kfh_log_group)

        self.kfh_instance = kfh.CfnDeliveryStream(
            stack,
            'exampledeliverystream',
            delivery_stream_type='DirectPut',
            elasticsearch_destination_configuration={
                "indexName": "webappclickstream",
                "cloudwatch_logging_options": {
                    "Enabled": True,
                    "LogGroupName": "exampledeliverystream",
                    "LogStreamName": "deliverytoes"
                },
                "roleArn": self.firehose_role.role_arn,
                "s3Configuration": {
                    "bucketArn": self.firehose_bucket.bucket_arn,
                    "roleArn": self.firehose_role.role_arn
                },
                "domainArn": self.elastic_search.attr_arn,
                "vpcConfiguration": {
                    "roleArn":
                    self.firehose_role.role_arn,
                    "securityGroupIds":
                    [self.kfh_security_group.security_group_id],
                    "subnetIds": [
                        vpc_db_instance.vpc.select_subnets(
                            subnet_type=ec2.SubnetType.PRIVATE).subnet_ids[0]
                    ]
                },
                "bufferingHints": {
                    "intervalInSeconds": 60,
                    "sizeInMBs": 1
                },
                "ProcessingConfiguration": {
                    "enabled": True,
                    "Processors": []
                },
                "ProcessingConfiguration": {
                    "Enabled":
                    "true",
                    "Processors": [{
                        "Parameters": [{
                            "ParameterName":
                            "LambdaArn",
                            "ParameterValue":
                            self.lambda_transform_fn.function_arn
                        }],
                        "Type":
                        "Lambda"
                    }]
                }
            })
Exemplo n.º 5
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # this could be parameter as well
        kdaAppName = "kda-flink-application"

        # Our Flink Jar File
        kdaApplicationJar = core.CfnParameter(
            self,
            "kdaApplicationJar",
            type="String",
            description="The name of the Flink JAR File.",
            default="kda-flink-app-1.0-SNAPSHOT.jar")

        bucketNameParm = core.CfnParameter(
            self,
            "bucketNameParm",
            type="String",
            description=
            "The name of the Amazon S3 bucket where Flink JAR File is.")

        inputStream = core.CfnParameter(
            self,
            "inputStream",
            type="String",
            description="The name of the Kinesis data stream for Input.",
            default="ProducerStream")

        outputStream = core.CfnParameter(
            self,
            "outputStream",
            type="String",
            description="The name of the Kinesis Firehose delivery steram.",
            default="AnalyticsOutput")

        region = core.CfnOutput(self, "region", value=self.region)
        account = core.CfnOutput(self, "account", value=self.account)

        # s3 bucket where JAR file is
        s3Bucket = s3.Bucket.from_bucket_name(
            self, "sourceBucket", bucket_name=bucketNameParm.value_as_string)

        bucketPolicy = iam.PolicyStatement(
            sid="GrantS3Access",
            actions=["s3:*"],
            resources=[s3Bucket.bucket_arn, s3Bucket.bucket_arn + "/*"])

        core.CfnOutput(self, "KDASourceBucketARN", value=s3Bucket.bucket_arn)
        core.CfnOutput(self, "KDASourceBucketName", value=s3Bucket.bucket_name)

        # KDA Role - will be assumed by KDA to load JAR as well as to read from input or write to sink
        kdaIAMRole = iam.Role(
            self,
            "kdaIAMRole",
            assumed_by=iam.ServicePrincipal('kinesisanalytics.amazonaws.com'),
            description="Kinesis Analytics role for application " + kdaAppName)
        kdaIAMRole.add_to_policy(statement=bucketPolicy)

        # input Kinesis Data Stream
        kdsStream = kds.Stream.from_stream_arn(
            self,
            id="kdsinputstream",
            stream_arn="arn:aws:kinesis:" + region.value + ":" +
            account.value + ":stream/" + inputStream.value_as_string)

        # output (sink) Firehose Delivery Stream
        fhDeliveryARN = "arn:aws:firehose:" + region.value + ":" + account.value + ":deliverystream/" + outputStream.value_as_string

        # Logs and CWatch
        logGroup = log.LogGroup(self,
                                kdaAppName + "LogGroup",
                                retention=log.RetentionDays.ONE_DAY)
        core.CfnOutput(self, "LogGroupName", value=logGroup.log_group_name)
        logStream = log.LogStream(self,
                                  kdaAppName + "LogStream",
                                  log_group=logGroup)

        # grant permissions to KDA IAM role
        s3Bucket.grant_read(identity=kdaIAMRole)
        kdsStream.grant_read(grantee=kdaIAMRole)
        cwatch.Metric.grant_put_metric_data(grantee=kdaIAMRole)
        logGroup.grant_write(grantee=kdaIAMRole)

        kdaIAMRole.add_to_policy(
            iam.PolicyStatement(sid="DescribeLog",
                                resources=[
                                    "arn:aws:logs:{0}:{1}:log-group:*".format(
                                        core.Aws.REGION, core.Aws.ACCOUNT_ID)
                                ],
                                actions=["logs:DescribeLog*"]))

        kdaIAMRole.add_to_policy(
            iam.PolicyStatement(sid="FullAccessToJARFile",
                                resources=[
                                    s3Bucket.bucket_arn + "/" +
                                    kdaApplicationJar.value_as_string
                                ],
                                actions=['s3:*']))

        kdaIAMRole.add_to_policy(
            iam.PolicyStatement(sid="WriteToFirehose",
                                resources=[fhDeliveryARN],
                                actions=["firehose:*"]))

        # KDA Flink application Configuration
        snapshots = kda.CfnApplicationV2.ApplicationSnapshotConfigurationProperty(
            snapshots_enabled=False)
        codeContent = kda.CfnApplicationV2.ApplicationCodeConfigurationProperty(
            code_content=kda.CfnApplicationV2.CodeContentProperty(
                s3_content_location=kda.CfnApplicationV2.
                S3ContentLocationProperty(
                    bucket_arn=s3Bucket.bucket_arn,
                    file_key=kdaApplicationJar.value_as_string)),
            code_content_type="ZIPFILE")
        appProperties = kda.CfnApplicationV2.EnvironmentPropertiesProperty(
            property_groups=[
                kda.CfnApplicationV2.PropertyGroupProperty(
                    property_group_id="ConsumerConfigProperties",
                    property_map=({
                        "AWS_REGION": region.value,
                        "INPUT_STREAM": inputStream.value_as_string,
                        "flink.inputstream.initpos": "LATEST"
                    })),
                kda.CfnApplicationV2.PropertyGroupProperty(
                    property_group_id="OutputConfigProperties",
                    property_map=({
                        "AWS_REGION": region.value,
                        "OUTPUT_KDF": outputStream.value_as_string
                    }))
            ])

        appConfig = kda.CfnApplicationV2.ApplicationConfigurationProperty(
            application_code_configuration=codeContent,
            application_snapshot_configuration=snapshots,
            environment_properties=appProperties)
        # call KDA APP stack to create KDA Flink Application
        KdaAppStack(self,
                    "app-stack",
                    iamRole=kdaIAMRole,
                    kdaAppName=kdaAppName,
                    logGroup=logGroup,
                    logStream=logStream,
                    appConfig=appConfig)
Exemplo n.º 6
0
    def __init__(self, scope: core.Stack, id: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        self.output_bucket = aws_s3.Bucket(
            self, "BucketTwitterStreamOutput",
            bucket_name = self.stack_name,
        )

        self.bucket_url = self.output_bucket.bucket_regional_domain_name

        # Because kinesis firehose bindings are to direct CF, we have to create IAM policy/role and attach on our own
        self.iam_role = aws_iam.Role(
            self, "IAMRoleTwitterStreamKinesisFHToS3",
            role_name="KinesisFirehoseToS3-{}".format(self.stack_name),
            assumed_by=aws_iam.ServicePrincipal(service='firehose.amazonaws.com'),
        )

        # S3 bucket actions
        self.s3_iam_policy_statement = aws_iam.PolicyStatement()
        actions = ["s3:GetBucketLocation", "s3:GetObject", "s3:ListBucket", "s3:ListBucketMultipartUploads", "s3:PutObject"]
        for action in actions:
            self.s3_iam_policy_statement.add_actions(action)
        self.s3_iam_policy_statement.add_resources(self.output_bucket.bucket_arn)
        self.s3_iam_policy_statement.add_resources(self.output_bucket.bucket_arn + "/*")

        # CW error log setup
        self.s3_error_logs_group = aws_logs.LogGroup(
            self, "S3ErrorLogsGroup",
            log_group_name="{}-s3-errors".format(self.stack_name)
        )

        self.s3_error_logs_stream = aws_logs.LogStream(
            self, "S3ErrorLogsStream",
            log_group=self.s3_error_logs_group,
            log_stream_name='s3Backup'
        )

        self.firehose = aws_kinesisfirehose.CfnDeliveryStream(
            self, "FirehoseTwitterStream",
            delivery_stream_name = "{}-raw".format(self.stack_name),
            delivery_stream_type = "DirectPut",
            s3_destination_configuration={
                'bucketArn': self.output_bucket.bucket_arn,
                'bufferingHints': {
                    'intervalInSeconds': 120,
                    'sizeInMBs': 10
                },
                'compressionFormat': 'UNCOMPRESSED',
                'roleArn': self.iam_role.role_arn,
                'cloudWatchLoggingOptions': {
                    'enabled': True,
                    'logGroupName': "{}-raw".format(self.stack_name),
                    'logStreamName': 's3BackupRaw'
                },
                'prefix': 'twitter-raw/'
            },
        )

        # TODO: Only attach what's needed for this policy, right now i'm lazy and attaching all policies
        self.iam_policy = aws_iam.Policy(
            self, "IAMPolicyTwitterStreamKinesisFHToS3",
            policy_name="KinesisFirehoseToS3-{}".format(self.stack_name),
            statements=[self.s3_iam_policy_statement],
        )

        self.iam_policy.attach_to_role(self.iam_role)

        # Because kinesis firehose bindings are to direct CF, we have to create IAM policy/role and attach on our own
        self.curator_firehose = aws_kinesisfirehose.CfnDeliveryStream(
            self, "CuratorFirehoseStream",
            delivery_stream_name = "{}-curator".format(self.stack_name),
            delivery_stream_type = "DirectPut",
            s3_destination_configuration={
                'bucketArn': self.output_bucket.bucket_arn,
                'bufferingHints': {
                    'intervalInSeconds': 120,
                    'sizeInMBs': 10
                },
                'compressionFormat': 'UNCOMPRESSED',
                'roleArn': self.iam_role.role_arn,
                'cloudWatchLoggingOptions': {
                    'enabled': True,
                    'logGroupName': "{}-curator".format(self.stack_name),
                    'logStreamName': 's3BackupCurator'
                },
                'prefix': 'twitter-curated/'
            },
        )

        def zip_package():
            cwd = os.getcwd()
            file_name = 'curator-lambda.zip'
            zip_file = cwd + '/' + file_name

            os.chdir('src/')
            sh.zip('-r9', zip_file, '.')
            os.chdir(cwd)

            return file_name, zip_file

        _, zip_file = zip_package()

        self.twitter_stream_curator_lambda_function = aws_lambda.Function(
            self, "TwitterStreamCuratorLambdaFunction",
            function_name="{}-curator".format(self.stack_name),
            code=aws_lambda.AssetCode(zip_file),
            handler="sentiment_analysis.lambda_handler",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            tracing=aws_lambda.Tracing.ACTIVE,
            description="Triggers from S3 PUT event for twitter stream data and transorms it to clean json syntax with sentiment analysis attached",
            environment={
                "STACK_NAME": self.stack_name,
                "FIREHOSE_STREAM": self.curator_firehose.delivery_stream_name
            },
            memory_size=128,
            timeout=core.Duration.seconds(120),
            log_retention=aws_logs.RetentionDays.ONE_WEEK,
        )

        # Permission to talk to comprehend for sentiment analysis
        self.comprehend_iam_policy_statement = aws_iam.PolicyStatement()
        self.comprehend_iam_policy_statement.add_actions('comprehend:*')
        self.comprehend_iam_policy_statement.add_all_resources()
        self.twitter_stream_curator_lambda_function.add_to_role_policy(self.comprehend_iam_policy_statement)

        # Permission to put in kinesis firehose
        self.curator_firehose_iam_policy_statement = aws_iam.PolicyStatement()
        self.curator_firehose_iam_policy_statement.add_actions('firehose:Put*')
        self.curator_firehose_iam_policy_statement.add_resources(self.curator_firehose.attr_arn)
        self.twitter_stream_curator_lambda_function.add_to_role_policy(self.curator_firehose_iam_policy_statement)

        # Attaching the policy to the IAM role for KFH
        self.output_bucket.grant_read(self.twitter_stream_curator_lambda_function)

        self.twitter_stream_curator_lambda_function.add_event_source(
            aws_lambda_event_sources.S3EventSource(
                bucket=self.output_bucket,
                events=[
                    aws_s3.EventType.OBJECT_CREATED
                ],
                filters=[
                    aws_s3.NotificationKeyFilter(
                        prefix="twitter-raw/"
                    )
                ]
            )
        )
Exemplo n.º 7
0
    def __init__(self, scope: core.Construct, id: str, es_domain: CfnDomain, kda_role: iam.Role,
                 source_bucket: s3.Bucket, dest_bucket: s3.Bucket, **kwargs):
        super().__init__(scope, id, **kwargs)

        stack = Stack.of(self)

        kda_role.add_to_policy(PolicyStatement(actions=['cloudwatch:PutMetricData'],
                                               resources=['*']))

        artifacts_bucket_arn = 'arn:aws:s3:::' + _config.ARA_BUCKET.replace("s3://", "")
        kda_role.add_to_policy(PolicyStatement(actions=['s3:GetObject', 's3:GetObjectVersion'],
                                               resources=[artifacts_bucket_arn, artifacts_bucket_arn + '/binaries/*']))
        log_group = logs.LogGroup(scope=self,
                                  id='KdaLogGroup',
                                  retention=logs.RetentionDays.ONE_WEEK,
                                  removal_policy=RemovalPolicy.DESTROY)

        log_stream = logs.LogStream(scope=self,
                                    id='KdaLogStream',
                                    log_group=log_group,
                                    removal_policy=RemovalPolicy.DESTROY)

        log_stream_arn = stack.format_arn(service='logs',
                                          resource='log-group',
                                          resource_name=log_group.log_group_name + ':log-stream:' +
                                                        log_stream.log_stream_name,
                                          sep=':')

        # TODO: restrict
        kda_role.add_to_policy(PolicyStatement(actions=['logs:*'],
                                               resources=[stack.format_arn(service='logs', resource='*')]))

        kda_role.add_to_policy(PolicyStatement(actions=['logs:DescribeLogStreams', 'logs:DescribeLogGroups'],
                                               resources=[log_group.log_group_arn,
                                                          stack.format_arn(service='logs', resource='log-group',
                                                                           resource_name='*')]))

        kda_role.add_to_policy(PolicyStatement(actions=['logs:PutLogEvents'],
                                               resources=[log_stream_arn]))

        kda_role.add_to_policy(PolicyStatement(actions=['es:ESHttp*'],
                                               resources=[stack.format_arn(service='es', resource='domain',
                                                                           resource_name=es_domain.domain_name + '/*')]))

        # TODO: restrict
        kda_role.add_to_policy(PolicyStatement(actions=['s3:*'],
                                               resources=['arn:aws:s3::::*']))

        # Define delivery stream
        # delivery_stream_name = 'clean_delivery_stream'
        #
        # s3_configuration = {
        #     'bucketArn': '',
        #     'compressionFormat': 'Snappy',
        #     'dataFormatConversionConfiguration': {
        #         'enabled': True,
        #         'inputFormatConfiguration': {'deserializer': },
        #         'outputFormatConfiguration': {'serializer': {'parquetSerDe': }},
        #         'schemaConfiguration': {}
        #     },
        #     'prefix': 'streaming'
        # }
        #
        # delivery_stream = CfnDeliveryStream(scope=self,
        #                                     id='Firehose Delivery Stream',
        #                                     delivery_stream_name=delivery_stream_name,
        #                                     delivery_stream_type='DirectPut',
        #                                     extended_s3_destination_configuration=s3_configuration
        #                                     )

        # Define KDA application
        application_configuration = {
            'environmentProperties': {
                'propertyGroups': [
                    {
                        'propertyGroupId': 'ConsumerConfigProperties',
                        'propertyMap': {
                            'CustomerStream': scope.customer_stream.stream_name,
                            'AddressStream': scope.address_stream.stream_name,
                            'SaleStream': scope.sale_stream.stream_name,
                            'PromoDataPath': source_bucket.s3_url_for_object('promo'),
                            'ItemDataPath': source_bucket.s3_url_for_object('item'),
                            'aws.region': scope.region
                        }
                    },
                    {
                        'propertyGroupId': 'ProducerConfigProperties',
                        'propertyMap': {
                            'ElasticsearchHost': 'https://' + es_domain.attr_domain_endpoint + ':443',
                            'Region': scope.region,
                            'DenormalizedSalesS3Path': dest_bucket.s3_url_for_object() + '/',
                            'IndexName': 'ara-write'
                        }
                    }
                ]
            },
            'applicationCodeConfiguration': {
                'codeContent': {
                    's3ContentLocation': {
                        'bucketArn': artifacts_bucket_arn,
                        'fileKey': 'binaries/stream-processing-1.1.jar'
                    }
                },
                'codeContentType': 'ZIPFILE'
            },
            'flinkApplicationConfiguration': {
                'parallelismConfiguration': {
                    'configurationType': 'DEFAULT'
                },
                'checkpointConfiguration': {
                    'configurationType': 'DEFAULT'
                },
                'monitoringConfiguration': {
                    'logLevel': 'DEBUG',
                    'metricsLevel': 'TASK',
                    'configurationType': 'CUSTOM'
                }
            },
            'applicationSnapshotConfiguration': {
                'snapshotsEnabled': False
            }
        }

        self.__app = CfnApplicationV2(scope=self,
                                      id='KDA application',
                                      runtime_environment='FLINK-1_11',
                                      application_name='KDA-application',
                                      service_execution_role=kda_role.role_arn,
                                      application_configuration=application_configuration)

        logging = CfnApplicationCloudWatchLoggingOptionV2(scope=self, id='KDA application logging',
                                                          application_name=self.__app.ref,
                                                          cloud_watch_logging_option={'logStreamArn': log_stream_arn})

        logging.apply_removal_policy(policy=RemovalPolicy.RETAIN, apply_to_update_replace_policy=True,
                                     default=RemovalPolicy.RETAIN)

        # Use a custom resource to start the application
        create_params = {
            'ApplicationName': self.__app.ref,
            'RunConfiguration': {
                'ApplicationRestoreConfiguration': {
                    'ApplicationRestoreType': 'SKIP_RESTORE_FROM_SNAPSHOT'
                },
                'FlinkRunConfiguration': {
                    'AllowNonRestoredState': True
                }
            }
        }

        # See https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/ for service name, actions and parameters
        create_action = AwsSdkCall(service='KinesisAnalyticsV2',
                                   action='startApplication',
                                   parameters=create_params,
                                   physical_resource_id=PhysicalResourceId.of(self.__app.ref + '-start'))

        delete_action = AwsSdkCall(service='KinesisAnalyticsV2',
                                   action='stopApplication',
                                   parameters={'ApplicationName': self.__app.ref, 'Force': True})

        custom_resource = AwsCustomResource(scope=self,
                                            id='KdaStartAndStop',
                                            on_create=create_action,
                                            on_delete=delete_action,
                                            policy=AwsCustomResourcePolicy.from_statements([PolicyStatement(
                                                actions=['kinesisanalytics:StartApplication',
                                                         'kinesisanalytics:StopApplication',
                                                         'kinesisanalytics:DescribeApplication',
                                                         'kinesisanalytics:UpdateApplication'], resources=[
                                                    stack.format_arn(service='kinesisanalytics', resource='application',
                                                                     resource_name=self.app.application_name)])]))

        custom_resource.node.add_dependency(self.app)
Exemplo n.º 8
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here
        vpc_name = self.node.try_get_context('vpc_name')
        vpc = aws_ec2.Vpc.from_lookup(self,
                                      'ExistingVPC',
                                      is_default=True,
                                      vpc_name=vpc_name)

        s3_bucket_name = self.node.try_get_context('s3_bucket_name')
        s3_bucket = s3.Bucket.from_bucket_name(self, 'S3KdaFlinkCodeLocation',
                                               s3_bucket_name)
        s3_path_to_flink_app_code = self.node.try_get_context(
            's3_path_to_flink_app_code')

        KDA_APP_NAME = 'KdaMskReplcation'

        kda_exec_role_policy_doc = aws_iam.PolicyDocument()
        kda_exec_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                **{
                    "sid":
                    "ReadCode",
                    "effect":
                    aws_iam.Effect.ALLOW,
                    "resources": [
                        "{}/{}".format(s3_bucket.bucket_arn,
                                       s3_path_to_flink_app_code)
                    ],
                    "actions": ["s3:GetObject", "s3:GetObjectVersion"]
                }))

        kda_exec_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                **{
                    "sid":
                    "ListCloudwatchLogGroups",
                    "effect":
                    aws_iam.Effect.ALLOW,
                    #XXX: The ARN will be formatted as follows:
                    # arn:{partition}:{service}:{region}:{account}:{resource}{sep}{resource-name}
                    "resources": [
                        self.format_arn(
                            service="logs",
                            resource="log-group",
                            resource_name=
                            "/aws/kinesis-analytics/{}:log-stream:*".format(
                                KDA_APP_NAME),
                            arn_format=cdk.ArnFormat.COLON_RESOURCE_NAME)
                    ],
                    "actions": ["logs:DescribeLogGroups"]
                }))

        kda_exec_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                **{
                    "sid":
                    "ListCloudwatchLogStreams",
                    "effect":
                    aws_iam.Effect.ALLOW,
                    #XXX: The ARN will be formatted as follows:
                    # arn:{partition}:{service}:{region}:{account}:{resource}{sep}{resource-name}
                    "resources": [
                        self.format_arn(
                            service="logs",
                            resource="log-group",
                            resource_name=
                            "/aws/kinesis-analytics/{}:log-stream:kinesis-analytics-log-stream"
                            .format(KDA_APP_NAME),
                            arn_format=cdk.ArnFormat.COLON_RESOURCE_NAME)
                    ],
                    "actions": ["logs:DescribeLogStreams"]
                }))

        kda_exec_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                **{
                    "sid":
                    "PutCloudwatchLogs",
                    "effect":
                    aws_iam.Effect.ALLOW,
                    #XXX: The ARN will be formatted as follows:
                    # arn:{partition}:{service}:{region}:{account}:{resource}{sep}{resource-name}
                    "resources": [
                        self.format_arn(
                            service="logs",
                            resource="log-group",
                            resource_name=
                            "/aws/kinesis-analytics/{}:log-stream:kinesis-analytics-log-stream"
                            .format(KDA_APP_NAME),
                            arn_format=cdk.ArnFormat.COLON_RESOURCE_NAME)
                    ],
                    "actions": ["logs:PutLogEvents"]
                }))

        kda_exec_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                **{
                    "sid":
                    "ENIReadWritePermissions",
                    "effect":
                    aws_iam.Effect.ALLOW,
                    "resources": ["*"],
                    "actions": [
                        "ec2:CreateNetworkInterface",
                        "ec2:CreateNetworkInterfacePermission",
                        "ec2:DescribeNetworkInterfaces",
                        "ec2:DeleteNetworkInterface"
                    ]
                }))

        kda_exec_role_policy_name = "kinesis-analytics-service-{kda_app_name}-{region}".format(
            region=cdk.Aws.REGION, kda_app_name=KDA_APP_NAME),

        kda_execution_role = aws_iam.Role(
            self,
            'KdaExecutionRole',
            role_name='kinesis-analytics-{kda_app_name}-{region}'.format(
                region=cdk.Aws.REGION, kda_app_name=KDA_APP_NAME),
            assumed_by=aws_iam.ServicePrincipal(
                'kinesisanalytics.amazonaws.com'),
            path='/service-role/',
            inline_policies={
                'kinesis-analytics-service': kda_exec_role_policy_doc
            },
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AmazonVPCReadOnlyAccess'),
            ])

        kda_flink_code_content = aws_kda_flink.CfnApplicationV2.CodeContentProperty(
            s3_content_location=aws_kda_flink.CfnApplicationV2.
            S3ContentLocationProperty(bucket_arn=s3_bucket.bucket_arn,
                                      file_key=s3_path_to_flink_app_code))

        kda_flink_code_config = aws_kda_flink.CfnApplicationV2.ApplicationCodeConfigurationProperty(
            code_content=kda_flink_code_content, code_content_type='ZIPFILE')

        kda_flink_property_groups = self.node.try_get_context(
            'kda_flink_property_groups')
        _property_groups = [
            aws_kda_flink.CfnApplicationV2.PropertyGroupProperty(**elem)
            for elem in kda_flink_property_groups
        ]
        kda_flink_env_props = aws_kda_flink.CfnApplicationV2.EnvironmentPropertiesProperty(
            property_groups=_property_groups)

        flink_app_config = aws_kda_flink.CfnApplicationV2.FlinkApplicationConfigurationProperty(
            checkpoint_configuration=aws_kda_flink.CfnApplicationV2.
            CheckpointConfigurationProperty(
                configuration_type='CUSTOM',
                checkpointing_enabled=True,
                checkpoint_interval=60000,
                min_pause_between_checkpoints=60000),
            monitoring_configuration=aws_kda_flink.CfnApplicationV2.
            MonitoringConfigurationProperty(configuration_type='CUSTOM',
                                            log_level='INFO',
                                            metrics_level='TASK'),
            parallelism_configuration=aws_kda_flink.CfnApplicationV2.
            ParallelismConfigurationProperty(
                configuration_type='CUSTOM',
                auto_scaling_enabled=False,
                parallelism=1,
                parallelism_per_kpu=1,
            ))

        kda_flink_app_config = aws_kda_flink.CfnApplicationV2.ApplicationConfigurationProperty(
            application_code_configuration=kda_flink_code_config,
            application_snapshot_configuration=aws_kda_flink.CfnApplicationV2.
            ApplicationSnapshotConfigurationProperty(snapshots_enabled=False),
            environment_properties=kda_flink_env_props,
            flink_application_configuration=flink_app_config)

        kda_app = aws_kda_flink.CfnApplicationV2(
            self,
            'KdaMskReplication',
            runtime_environment='FLINK-1_11',
            service_execution_role=kda_execution_role.role_arn,
            application_configuration=kda_flink_app_config,
            application_description=
            'A Kinesis Data Analytics application that reads from one Amazon MSK topic and writes to another',
            application_name=KDA_APP_NAME)

        kda_app_log_group = aws_logs.LogGroup(
            self,
            'KdaMskReplicationLogGroup',
            log_group_name='/aws/kinesis-analytics/{}'.format(KDA_APP_NAME),
            retention=aws_logs.RetentionDays.THREE_DAYS,
            removal_policy=cdk.RemovalPolicy.DESTROY)

        kda_app_log_stream = aws_logs.LogStream(
            self,
            'KdaMskReplicationLogStream',
            log_group=kda_app_log_group,
            log_stream_name='kinesis-analytics-log-stream',
            removal_policy=cdk.RemovalPolicy.DESTROY)

        #XXX: The ARN will be formatted as follows:
        # arn:{partition}:{service}:{region}:{account}:{resource}{sep}{resource-name}
        kda_app_log_stream_arn = self.format_arn(
            service="logs",
            resource="log-group",
            resource_name=
            "/aws/kinesis-analytics/{}:log-stream:kinesis-analytics-log-stream"
            .format(KDA_APP_NAME),
            arn_format=cdk.ArnFormat.COLON_RESOURCE_NAME)

        kda_app_cw_log = aws_kda_flink.CfnApplicationCloudWatchLoggingOptionV2(
            self,
            'KdaMskReplicationCWLog',
            application_name=kda_app.application_name,
            cloud_watch_logging_option=aws_kda_flink.
            CfnApplicationCloudWatchLoggingOptionV2.
            CloudWatchLoggingOptionProperty(
                log_stream_arn=kda_app_log_stream_arn))
        kda_app_cw_log.add_depends_on(kda_app)
Exemplo n.º 9
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.IVpc,
                 directory: mad.CfnMicrosoftAD, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        log_group = logs.LogGroup(self,
                                  'LogGroup',
                                  log_group_name='/homenet/vpn',
                                  removal_policy=core.RemovalPolicy.DESTROY,
                                  retention=logs.RetentionDays.ONE_MONTH)

        log_stream = logs.LogStream(self,
                                    'LogStream',
                                    log_group=log_group,
                                    log_stream_name='vpn-connection-logs')

        security_group = ec2.SecurityGroup(self,
                                           'SecurityGroup',
                                           vpc=vpc,
                                           allow_all_outbound=True,
                                           description='VPN clients')

        endpoint = ec2.CfnClientVpnEndpoint(
            self,
            'VpnEp',
            vpc_id=vpc.vpc_id,
            vpn_port=443,
            self_service_portal='enabled',
            split_tunnel=True,
            client_cidr_block='10.1.8.0/22',
            server_certificate_arn=
            'arn:aws:acm:us-east-1:581361757134:certificate/14e094b5-fd1d-4031-b0cc-4be1b77e5955',
            description='HomeNet vpc:endpoint',
            security_group_ids=[security_group.security_group_id],
            authentication_options=[
                ec2.CfnClientVpnEndpoint.ClientAuthenticationRequestProperty(
                    type='directory-service-authentication',
                    active_directory=ec2.CfnClientVpnEndpoint.
                    DirectoryServiceAuthenticationRequestProperty(
                        directory_id=directory.ref)),
            ],
            connection_log_options=ec2.CfnClientVpnEndpoint.
            ConnectionLogOptionsProperty(
                enabled=True,
                cloudwatch_log_group=log_group.log_group_name,
                cloudwatch_log_stream=log_stream.log_stream_name))

        count = 0
        for net in vpc.select_subnets(subnet_group_name='Vpn-Clients').subnets:
            count += 1
            ec2.CfnClientVpnTargetNetworkAssociation(
                self,
                'Network-' + str(count),
                client_vpn_endpoint_id=endpoint.ref,
                subnet_id=net.subnet_id)

        # ec2.CfnClientVpnTargetNetworkAssociation(self,'NetworkAssociation',
        #   client_vpn_endpoint_id=endpoint.ref,
        #   subnet_id= 'subnet-07f0e80d0ed1c1a27')

        ec2.CfnClientVpnAuthorizationRule(
            self,
            'ClientAuthorization',
            authorize_all_groups=True,
            target_network_cidr='10.0.0.0/8',
            client_vpn_endpoint_id=endpoint.ref,
            description='Allow everyone/everywhere')