示例#1
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = ec2.Vpc(self, "EKS_Kafka_PocClusterVPC")

        private_subnets = [snet_id.subnet_id for snet_id in vpc.private_subnets]
        bngi = msk.CfnCluster.BrokerNodeGroupInfoProperty(instance_type="kafka.m5.large",
                                                          client_subnets=private_subnets)

        msk_cluster = msk.CfnCluster(self, "EKS_KafkaPocMSKCluster",
                                     broker_node_group_info=bngi,
                                     cluster_name="EKSKafkaPOCMKSCluster",
                                     kafka_version="2.3.1",
                                     number_of_broker_nodes=3)

        eks_admin_role = iam.Role(self, "EKS_Kafka_PocCluster-AdminRole",
                                  assumed_by=iam.AccountPrincipal(account_id=self.account))

        eks_cluster = eks.Cluster(self, "EKS_Kafka_PocEKSCluster",
                                  cluster_name="EKS_Kafka_PocCluster",
                                  masters_role=eks_admin_role,
                                  kubectl_enabled=True,
                                  version="1.15",
                                  vpc=vpc)
        eks_cluster.add_capacity("worker", instance_type=ec2.InstanceType("t3.large"),
                                 min_capacity=1, max_capacity=10)
示例#2
0
  def __init__(self, scope: core.Construct, id: str, VPC: ec2.Vpc, **kwargs) -> None:
    
    super().__init__(scope, id, **kwargs)

    self.cluster = msk.CfnCluster(
      self,
      "statement_kafka",
      cluster_name="statement-demo",
      broker_node_group_info = {
        "clientSubnets" : [subnet.subnet_id for subnet in VPC.private_subnets],
        "instanceType": self.node.try_get_context("kafka")["instanceType"],
        "storageInfo" : {
          "ebsStorageInfo" : {
            "volumeSize": self.node.try_get_context("kafka")["volumeSize"]
          }
        }
      },
      kafka_version="2.5.1",
      number_of_broker_nodes=self.node.try_get_context("kafka")["number_of_broker_nodes"],
      encryption_info={
        "encryptionInTransit": {
          "clientBroker": "TLS"
        }
      },
      enhanced_monitoring="PER_TOPIC_PER_BROKER"
    )
    def __init__(self, scope: core.Construct, id: str,
                 e2e_security_group: aws_ec2.SecurityGroup, vpc: aws_ec2.Vpc,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        cluster_sec_group = aws_ec2.SecurityGroup(self,
                                                  'msk-cluster-sec-group',
                                                  vpc=vpc)
        cluster_sec_group.add_ingress_rule(peer=e2e_security_group,
                                           connection=aws_ec2.Port(
                                               string_representation='kafka',
                                               protocol=aws_ec2.Protocol.TCP,
                                               from_port=DEFAULT_KAFKA_PORT,
                                               to_port=DEFAULT_KAFKA_PORT))

        cluster = aws_msk.CfnCluster(
            self,
            'msk-cluster',
            cluster_name='cdk-test',
            number_of_broker_nodes=len(
                vpc.private_subnets),  # this is the minimum number needed
            kafka_version='2.3.1',
            broker_node_group_info=aws_msk.CfnCluster.
            BrokerNodeGroupInfoProperty(
                instance_type="kafka.m5.large",
                client_subnets=[
                    subnet.subnet_id for subnet in vpc.private_subnets
                ],
                security_groups=[cluster_sec_group.security_group_id]))

        core.CfnOutput(self, "arn", value=cluster.ref)
示例#4
0
    def __init__(
        self,
        scope: cdk.Construct,
        construct_id: str,
        stack_log_level,
        vpc,
        **kwargs
    ) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Create Security Group for Managed Kafka Instance
        self.kafka_client_sg = _ec2.SecurityGroup(
            self,
            "kafkaClientSG",
            vpc=vpc,
            description="kafka client security group",
            allow_all_outbound=True,
        )
        cdk.Tags.of(self.kafka_client_sg).add("Name", "kafka_client_sg")

        self.kafka_cluster_sg = _ec2.SecurityGroup(
            self,
            "kafkaSG",
            vpc=vpc,
            security_group_name=f"kafka_sg_{construct_id}",
            description="Security Group for Kafka Cluster"
        )

        # https://docs.aws.amazon.com/msk/latest/developerguide/troubleshooting.html#networking-trouble
        self.kafka_cluster_sg.add_ingress_rule(
            peer=_ec2.Peer.ipv4(vpc.vpc_cidr_block),
            connection=_ec2.Port.tcp(443),
            description="Allow incoming secure traffic from within VPC"
        )
        self.kafka_cluster_sg.add_ingress_rule(
            peer=_ec2.Peer.ipv4(vpc.vpc_cidr_block),
            connection=_ec2.Port.tcp(2181),
            description="Allow incoming secure traffic from within VPC"
        )
        self.kafka_cluster_sg.add_ingress_rule(
            peer=_ec2.Peer.ipv4(vpc.vpc_cidr_block),
            connection=_ec2.Port.tcp(9092),
            description="Allow incoming plaintext traffic REALLY from within VPC"
        )
        self.kafka_cluster_sg.add_ingress_rule(
            peer=_ec2.Peer.ipv4(vpc.vpc_cidr_block),
            connection=_ec2.Port.tcp(9094),
            description="Allow incoming TLS-encrypted traffic from within VPC"
        )
        cdk.Tags.of(self.kafka_cluster_sg).add("Name", "kafka_cluster_sg")

        # ALLOW kafka clients to connect to kafka cluster sg
        self.kafka_cluster_sg.add_ingress_rule(
            peer=self.kafka_client_sg,
            connection=_ec2.Port.all_tcp(),
            description="ALLOW kafka clients to connect to kafka cluster sg"
        )
        # ALLOW kafka Lambda Consumer to connect to kafka cluster sg
        # https://docs.aws.amazon.com/lambda/latest/dg/services-msk-topic-add.html
        self.kafka_client_sg.add_ingress_rule(
            peer=_ec2.Peer.ipv4(vpc.vpc_cidr_block),
            connection=_ec2.Port.all_tcp(),
            description="ALLOW kafka Lambda consumer to poll kafka cluster"
        )

        # Add your stack resources below):
        self.msk_cluster = _msk.CfnCluster(
            self,
            "managedKafka01",
            broker_node_group_info=_msk.CfnCluster.BrokerNodeGroupInfoProperty(
                instance_type="kafka.t3.small",
                client_subnets=vpc.select_subnets(
                    subnet_type=_ec2.SubnetType.PRIVATE
                ).subnet_ids,
                security_groups=[self.kafka_cluster_sg.security_group_id]
            ),
            cluster_name="miztiik-msk-cluster-01",
            kafka_version="2.3.1",
            number_of_broker_nodes=2
        )

        ###########################################
        ################# OUTPUTS #################
        ###########################################
        output_0 = cdk.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{GlobalArgs.SOURCE_INFO}",
            description="To know more about this automation stack, check out our github page."
        )

        output_1 = cdk.CfnOutput(
            self,
            "SalesEventsKafkaRouter",
            value=f"https://console.aws.amazon.com/msk/home?region={cdk.Aws.REGION}#/clusters/{self.msk_cluster.cluster_name}",
            description="Sales events Kafka Cluster"
        )
示例#5
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # VPC
        vpc = ec2.Vpc(
            self,
            f"{prefix}_vpc",
            nat_gateways=1,
            enable_dns_hostnames=True,
            enable_dns_support=True,
            max_azs=2,
            subnet_configuration=[
                ec2.SubnetConfiguration(name="public",
                                        subnet_type=ec2.SubnetType.PUBLIC),
                ec2.SubnetConfiguration(name="privat",
                                        subnet_type=ec2.SubnetType.PRIVATE)
            ])

        # MSK Cluster Security Group
        sg_msk = ec2.SecurityGroup(self,
                                   f"{prefix}_sg",
                                   vpc=vpc,
                                   allow_all_outbound=True,
                                   security_group_name=f"{prefix}_sg_msk")
        for subnet in vpc.public_subnets:
            sg_msk.add_ingress_rule(ec2.Peer.ipv4(subnet.ipv4_cidr_block),
                                    ec2.Port.tcp(2181), "Zookeeper Plaintext")
            sg_msk.add_ingress_rule(ec2.Peer.ipv4(subnet.ipv4_cidr_block),
                                    ec2.Port.tcp(2182), "Zookeeper TLS")
            sg_msk.add_ingress_rule(ec2.Peer.ipv4(subnet.ipv4_cidr_block),
                                    ec2.Port.tcp(9092), "Broker Plaintext")
            sg_msk.add_ingress_rule(ec2.Peer.ipv4(subnet.ipv4_cidr_block),
                                    ec2.Port.tcp(9094), "Zookeeper Plaintext")
        for subnet in vpc.private_subnets:
            sg_msk.add_ingress_rule(ec2.Peer.ipv4(subnet.ipv4_cidr_block),
                                    ec2.Port.all_traffic(),
                                    "All private traffic")

        # MSK Cluster
        msk.CfnCluster(
            self,
            f"{prefix}_kafka_cluster",
            cluster_name="msk-quickstart",
            number_of_broker_nodes=len(vpc.private_subnets),
            kafka_version="2.6.0",
            encryption_info=msk.CfnCluster.EncryptionInfoProperty(
                encryption_in_transit=msk.CfnCluster.
                EncryptionInTransitProperty(client_broker="TLS_PLAINTEXT")),
            broker_node_group_info=msk.CfnCluster.BrokerNodeGroupInfoProperty(
                instance_type="kafka.m5.large",
                client_subnets=[
                    subnet.subnet_id for subnet in vpc.private_subnets
                ],
                security_groups=[sg_msk.security_group_id],
                storage_info=msk.CfnCluster.StorageInfoProperty(
                    ebs_storage_info=msk.CfnCluster.EBSStorageInfoProperty(
                        volume_size=200))))

        # EC2 Client AMI
        amazon_linux = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        # Instance Role and SSM Managed Policy
        role = iam.Role(self,
                        f"{prefix}_ssm_role",
                        assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"))
        role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "service-role/AmazonEC2RoleforSSM"))
        role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonMSKReadOnlyAccess"))

        # EC2 Client Instance
        instance = ec2.Instance(
            self,
            f"{prefix}_instance",
            instance_type=ec2.InstanceType("m5.large"),
            machine_image=amazon_linux,
            vpc=vpc,
            role=role,
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC))

        # Bootstrap script in S3 as Asset
        asset_bootstrap = Asset(self,
                                f"{prefix}_bootstrap",
                                path=os.path.join(dirname, "configure.sh"))
        local_bootstrap_path = instance.user_data.add_s3_download_command(
            bucket=asset_bootstrap.bucket,
            bucket_key=asset_bootstrap.s3_object_key)

        # Loader project in S3 Asset
        asset_loader = Asset(self,
                             f"{prefix}_loader",
                             path=os.path.join(dirname, "earthquake_loader"))
        instance.user_data.add_s3_download_command(
            bucket=asset_loader.bucket,
            bucket_key=asset_loader.s3_object_key,
            local_file="earthquake_loader.zip")

        # Userdata executes bootstrap script from S3
        instance.user_data.add_execute_file_command(
            file_path=local_bootstrap_path)

        # Grant read permissions to assets
        asset_bootstrap.grant_read(instance.role)
        asset_loader.grant_read(instance.role)
    def __init__(self, scope: core.Construct, id: str, subnets: List[str],
                 vpc: ec2.IVpc, default_vpc_cidr_block: str, project: str,
                 namespace: cloudmap.HttpNamespace, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # firewall for load balancers
        self.lbFirewall = ec2.SecurityGroup(
            scope=self,
            id='LbFirewall',
            vpc=vpc,
            description='Load balancer firewall')
        self.lbFirewall.add_ingress_rule(peer=ec2.Peer.ipv4(
            vpc.vpc_cidr_block),
                                         connection=ec2.Port.all_traffic())
        self.lbFirewall.add_ingress_rule(
            peer=ec2.Peer.ipv4(default_vpc_cidr_block),
            connection=ec2.Port.all_traffic())

        # unified client firewall for both MSK and DocumentDB
        self.unifiedClientFirewall = ec2.SecurityGroup(
            scope=self,
            id='UnifiedClientFirewall',
            vpc=vpc,
            description='Client access firewall for DocumentDB and MSK')
        self.unifiedClientFirewall.add_ingress_rule(
            peer=self.lbFirewall, connection=ec2.Port.all_traffic())

        # DocumentDB cluster
        projTag = core.CfnTag(key='Project', value=project)
        subnetGroup = docdb.CfnDBSubnetGroup(
            scope=self,
            id='DatabaseSubnetGroup',
            db_subnet_group_description='Subnet group for database',
            subnet_ids=subnets,
            tags=[projTag,
                  core.CfnTag(key='Name', value='DocDbSubnetGroup')])

        self.clientFirewall = ec2.SecurityGroup(
            scope=self,
            id='DatabaseClientFirewall',
            vpc=vpc,
            description='Client access firewall for DocumentDB')
        self.dbFirewall = ec2.SecurityGroup(
            scope=self,
            id='DatabaseInternalFirewall',
            vpc=vpc,
            allow_all_outbound=True,
            description='Firewall for DocumentDB')
        self.dbFirewall.add_ingress_rule(peer=self.clientFirewall,
                                         connection=ec2.Port.all_traffic())
        self.dbFirewall.add_ingress_rule(peer=self.unifiedClientFirewall,
                                         connection=ec2.Port.all_traffic())
        self.dbFirewall.add_ingress_rule(
            peer=ec2.Peer.ipv4(default_vpc_cidr_block),
            connection=ec2.Port.all_traffic())
        self.docdbCluster = docdb.CfnDBCluster(
            scope=self,
            id='DataStore',
            db_subnet_group_name=subnetGroup.ref,
            master_username='******',
            master_user_password='******',
            vpc_security_group_ids=[self.dbFirewall.security_group_id])
        self.docdbInstances = [
            docdb.CfnDBInstance(scope=self,
                                id="DataStore-Instance-{0}".format(str(i)),
                                db_cluster_identifier=self.docdbCluster.ref,
                                db_instance_class='db.r5.xlarge')
            for i in range(3)
        ]
        self.docdbCloudMap = namespace.create_service(id='DbSvc')
        self.docdbCloudMap.register_non_ip_instance(
            id='dbEndpoint',
            custom_attributes={
                'endpoint': self.docdbCluster.attr_endpoint,
                'user': '******',
                'password': '******'
            })
        self.docdbCloudMap.register_non_ip_instance(
            id='dbReadEndpoint',
            custom_attributes={
                'endpoint': self.docdbCluster.attr_read_endpoint
            })

        # MSK cluster
        self.kafkaClientFirewall = ec2.SecurityGroup(
            scope=self,
            id='KafkaClientFirewall',
            vpc=vpc,
            description='Client access firewall for Kafka')
        self.kafkaFirewall = ec2.SecurityGroup(
            scope=self,
            id='KafkaInternalFirewall',
            vpc=vpc,
            allow_all_outbound=True,
            description='Firewall for Kafka')
        self.kafkaFirewall.add_ingress_rule(peer=self.kafkaClientFirewall,
                                            connection=ec2.Port.all_traffic())
        self.kafkaFirewall.add_ingress_rule(peer=self.unifiedClientFirewall,
                                            connection=ec2.Port.all_traffic())
        self.kafkaFirewall.add_ingress_rule(
            peer=ec2.Peer.ipv4(default_vpc_cidr_block),
            connection=ec2.Port.all_traffic())
        self.kafkaFirewall.add_ingress_rule(peer=self.kafkaFirewall,
                                            connection=ec2.Port.all_traffic())
        num_brokers = len(subnets)
        if num_brokers < 3:
            num_brokers = 2 * num_brokers
        self.kafka = msk.CfnCluster(
            scope=self,
            id='kafka',
            cluster_name='kafkafargateworkshop',
            kafka_version='2.2.1',
            number_of_broker_nodes=num_brokers,
            enhanced_monitoring='PER_TOPIC_PER_BROKER',
            broker_node_group_info=msk.CfnCluster.BrokerNodeGroupInfoProperty(
                client_subnets=subnets,
                instance_type='kafka.m5.large',
                security_groups=[self.kafkaFirewall.security_group_id]))
        self.kafkaCloudMap = namespace.create_service(id='KafkaSvc')
        self.kafkaCloudMap.register_non_ip_instance(
            id='KafkaBrokerArn',
            custom_attributes={'broker_arn': self.kafka.ref})

        # ECR
        self.docker_repo = ecr.Repository(scope=self,
                                          id="FargateImageRepository")
        ssm.StringParameter(scope=self,
                            id='SSMParamRegion',
                            string_value=self.region,
                            parameter_name='region')
        ssm.StringParameter(scope=self,
                            id='SSMParamRepoUri',
                            string_value=self.docker_repo.repository_uri,
                            parameter_name='repo_uri')
        self.docker_repo_s3sink = ecr.Repository(
            scope=self, id="FargateImageRepositoryS3Sink")
        self.docker_repo_s3sinkhc = ecr.Repository(
            scope=self, id="FargateImageRepositoryS3SinkHC")
        ssm.StringParameter(
            scope=self,
            id='SSMParamRepoUriS3Sink',
            string_value=self.docker_repo_s3sink.repository_uri,
            parameter_name='repo_uri_s3_sink')
        ssm.StringParameter(
            scope=self,
            id='SSMParamRepoUriS3SinkHC',
            string_value=self.docker_repo_s3sinkhc.repository_uri,
            parameter_name='repo_uri_s3_sink_hc')
        self.docker_repo_sftp = ecr.Repository(scope=self,
                                               id="FargateImageRepositorySftp")
        ssm.StringParameter(scope=self,
                            id='SSMParamRepoUriSftp',
                            string_value=self.docker_repo_sftp.repository_uri,
                            parameter_name='repo_uri_sftp')
        self.docker_repo_batch = ecr.Repository(
            scope=self, id="FargateImageRepositoryBatch")
        ssm.StringParameter(scope=self,
                            id='SSMParamRepoUriBatch',
                            string_value=self.docker_repo_batch.repository_uri,
                            parameter_name='repo_uri_batch')

        # SFTP server
        self.sftpBucket = s3.Bucket(
            scope=self,
            id="SFTPBucket",
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            encryption=s3.BucketEncryption.S3_MANAGED)
        core.CfnOutput(scope=self,
                       id="SFTPBucketName",
                       value=self.sftpBucket.bucket_name)
        self.sftp_role = iam.Role(
            scope=self,
            id="SFTPRole",
            assumed_by=iam.ServicePrincipal("transfer.amazonaws.com"))
        self.sftp_role.add_to_policy(
            statement=iam.PolicyStatement(resources=[
                self.sftpBucket.bucket_arn, self.sftpBucket.bucket_arn + '/*'
            ],
                                          actions=['s3:*']))
        self.sftp_vpce = vpc.add_interface_endpoint(
            id="SftpEndpoint",
            service=ec2.InterfaceVpcEndpointAwsService.TRANSFER)
        self.sftp_vpce.connections.allow_default_port_from(
            other=ec2.Peer.ipv4(vpc.vpc_cidr_block))
        self.sftp_vpce.connections.allow_default_port_from(
            other=ec2.Peer.ipv4(default_vpc_cidr_block))
        self.sftp_vpce.connections.allow_from(other=ec2.Peer.ipv4(
            vpc.vpc_cidr_block),
                                              port_range=ec2.Port.tcp(22))
        self.sftp_vpce.connections.allow_from(
            other=ec2.Peer.ipv4(default_vpc_cidr_block),
            port_range=ec2.Port.tcp(22))
        self.sftp = transfer.CfnServer(
            scope=self,
            id="SFTP",
            endpoint_type='VPC_ENDPOINT',
            endpoint_details=transfer.CfnServer.EndpointDetailsProperty(
                vpc_endpoint_id=self.sftp_vpce.vpc_endpoint_id),
            identity_provider_type='SERVICE_MANAGED')
        self.sftp_user = transfer.CfnUser(scope=self,
                                          id="SFTPUser",
                                          role=self.sftp_role.role_arn,
                                          server_id=self.sftp.attr_server_id,
                                          user_name="sftpuser")
        core.CfnOutput(scope=self,
                       id="SFTPHostVpceOut",
                       value=self.sftp_vpce.vpc_endpoint_id)
        core.CfnOutput(scope=self,
                       id="SFTPUserOut",
                       value=self.sftp_user.attr_user_name)
        self.sftpCloudMap = namespace.create_service(id='SftpSvc', name='SFTP')
        self.sftpCloudMap.register_non_ip_instance(
            id='sftpEndpoint',
            custom_attributes={
                'vpce_id': self.sftp_vpce.vpc_endpoint_id,
                'user': '******',
                'bucket': self.sftpBucket.bucket_name
            })
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 vpc_stack,
                 client: bool = True,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ensure_service_linked_role("kafka.amazonaws.com")

        client_properties = assets.Asset(self,
                                         "client_properties",
                                         path=os.path.join(
                                             dirname, "client.properties"))

        self.kafka_client_security_group = ec2.SecurityGroup(
            self,
            "kafka_client_security_group",
            vpc=vpc_stack.get_vpc,
            description="kafka client security group",
            allow_all_outbound=True,
        )
        # core.Tag.add(self.kafka_client_security_group, "project", constants["PROJECT_TAG"])
        core.Tags.of(self.kafka_client_security_group).add(
            "project", constants["PROJECT_TAG"])
        # core.Tag.add(self.kafka_client_security_group, "Name", "kafka_client_sg")
        core.Tags.of(self.kafka_client_security_group).add(
            "Name", "kafka_client_sg")

        self.kafka_client_security_group.add_ingress_rule(
            ec2.Peer.ipv4(f"{external_ip}/32"),
            ec2.Port.tcp(22),
            "from own public ip",
        )

        self.kafka_security_group = ec2.SecurityGroup(
            self,
            "kafka_security_group",
            vpc=vpc_stack.get_vpc,
            description="kafka security group",
            allow_all_outbound=True,
        )
        core.Tags.of(self.kafka_security_group).add("project",
                                                    constants["PROJECT_TAG"])
        core.Tags.of(self.kafka_security_group).add("Name", "kafka_sg")

        self.kafka_security_group.connections.allow_from(
            self.kafka_security_group,
            ec2.Port.all_traffic(),
            "within kafka",
        )
        self.kafka_security_group.connections.allow_from(
            self.kafka_client_security_group,
            ec2.Port.all_traffic(),
            "from kafka client sg",
        )

        self.kafka_client_security_group.connections.allow_from(
            self.kafka_security_group,
            ec2.Port.all_traffic(),
            "from kafka",
        )

        self.kafka_cluster = msk.CfnCluster(
            self,
            "kafka_cluster",
            broker_node_group_info={
                "clientSubnets": vpc_stack.get_vpc_public_subnet_ids,
                "instanceType": constants["KAFKA_INSTANCE_TYPE"],
                "numberOfBrokerNodes": constants["KAFKA_BROKER_NODES"],
                "securityGroups":
                [self.kafka_security_group.security_group_id],
            },
            encryption_info={
                "encryptionInTransit": {
                    "InCluster": "true",
                    "clientBroker": "PLAINTEXT",
                },
            },
            cluster_name=constants["PROJECT_TAG"],
            kafka_version=constants["KAFKA_VERSION"],
            number_of_broker_nodes=constants["KAFKA_BROKER_NODES"],
            enhanced_monitoring="DEFAULT",
        )
        core.Tags.of(self.kafka_cluster).add("project",
                                             constants["PROJECT_TAG"])

        # instance for kafka client
        if client == True:
            # userdata for kafka client
            kafka_client_userdata = user_data_init(
                log_group_name="filebeat-kafka/kafka/instance")
            # create the instance
            kafka_client_instance = ec2.Instance(
                self,
                "kafka_client",
                instance_type=ec2.InstanceType(
                    constants["KAFKA_CLIENT_INSTANCE"]),
                machine_image=ec2.AmazonLinuxImage(
                    generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2),
                vpc=vpc_stack.get_vpc,
                vpc_subnets={"subnet_type": ec2.SubnetType.PUBLIC},
                key_name=constants["KEY_PAIR"],
                security_group=self.kafka_client_security_group,
                user_data=kafka_client_userdata,
            )
            core.Tags.of(kafka_client_instance).add("project",
                                                    constants["PROJECT_TAG"])
            # needs kafka cluster to be available
            kafka_client_instance.node.add_dependency(self.kafka_cluster)
            # create policies for EC2 to connect to Kafka
            access_kafka_policy = iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                actions=[
                    "kafka:ListClusters",
                    "kafka:GetBootstrapBrokers",
                    "kafka:DescribeCluster",
                ],
                resources=["*"],
            )
            # add the role permissions
            kafka_client_instance.add_to_role_policy(
                statement=access_kafka_policy)
            # add log permissions
            instance_add_log_permissions(kafka_client_instance)
            # add access to the file asset
            client_properties.grant_read(kafka_client_instance)
            # update the userdata with commands
            kafka_client_userdata.add_commands(
                f"aws s3 cp s3://{client_properties.s3_bucket_name}/{client_properties.s3_object_key} /home/ec2-user/client.properties",
                "yum install java-1.8.0 -y",
                f'curl https:/archive.apache.org/dist/kafka/{constants["KAFKA_DOWNLOAD_VERSION"].split("-")[-1]}/{constants["KAFKA_DOWNLOAD_VERSION"]}.tgz -o {constants["KAFKA_DOWNLOAD_VERSION"]}.tgz',
                f"tar -xvf {constants['KAFKA_DOWNLOAD_VERSION']}.tgz",
                f"mv {constants['KAFKA_DOWNLOAD_VERSION']} /opt",
                f"rm {constants['KAFKA_DOWNLOAD_VERSION']}.tgz",
                f"mv -f /home/ec2-user/client.properties /opt/{constants['KAFKA_DOWNLOAD_VERSION']}/bin/client.properties",
                # create the topic, if already exists capture error message
                f"kafka_arn=`aws kafka list-clusters --region {core.Aws.REGION} --output text --query 'ClusterInfoList[*].ClusterArn'`",
                f"kafka_zookeeper=`aws kafka describe-cluster --cluster-arn $kafka_arn --region {core.Aws.REGION} --output text --query 'ClusterInfo.ZookeeperConnectString'`",
                f"make_topic=`/opt/{constants['KAFKA_DOWNLOAD_VERSION']}/bin/kafka-topics.sh --create --zookeeper $kafka_zookeeper --replication-factor 3 --partitions 1 --topic topic 2>&1`",
                f"make_topic=`/opt/{constants['KAFKA_DOWNLOAD_VERSION']}/bin/kafka-topics.sh --create --zookeeper $kafka_zookeeper --replication-factor 3 --partitions 1 --topic apachelog 2>&1`",
                f"make_topic=`/opt/{constants['KAFKA_DOWNLOAD_VERSION']}/bin/kafka-topics.sh --create --zookeeper $kafka_zookeeper --replication-factor 3 --partitions 1 --topic appevent 2>&1`",
            )
            # add the signal
            kafka_client_userdata.add_signal_on_exit_command(
                resource=kafka_client_instance)

            kafka_client_instance.add_user_data(kafka_client_userdata.render())
示例#8
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        vpc_name = self.node.try_get_context('vpc_name')
        vpc = aws_ec2.Vpc.from_lookup(self,
                                      'ExistingVPC',
                                      is_default=True,
                                      vpc_name=vpc_name)

        #XXX: create new vpc for msk cluster
        # vpc = aws_ec2.Vpc(self, 'VpcStack',
        #   max_azs=3,
        #   gateway_endpoints={
        #     "S3": aws_ec2.GatewayVpcEndpointOptions(
        #       service=aws_ec2.GatewayVpcEndpointAwsService.S3
        #     )
        #   }
        # )

        MSK_CLUSTER_NAME = cdk.CfnParameter(
            self,
            'KafkaClusterName',
            type='String',
            description='Managed Streaming for Apache Kafka cluster name',
            default='MSK-{}'.format(''.join(
                random.sample((string.ascii_letters), k=5))),
            allowed_pattern='[A-Za-z0-9\-]+')

        KAFA_VERSION = cdk.CfnParameter(
            self,
            'KafkaVersion',
            type='String',
            description='Apache Kafka version',
            default='2.6.2',
            # Supported Apache Kafka versions
            # https://docs.aws.amazon.com/msk/latest/developerguide/supported-kafka-versions.html
            allowed_values=[
                '2.8.1', '2.8.0', '2.7.1', '2.6.2', '2.6.1', '2.6.0', '2.5.1',
                '2.4.1.1', '2.3.1', '2.2.1'
            ])

        #XXX: change broker instance type
        KAFA_BROKER_INSTANCE_TYPE = cdk.CfnParameter(
            self,
            'KafkaBrokerInstanceType',
            type='String',
            description='Apache Kafka Broker instance type',
            default='kafka.m5.large')

        #XXX: change volume size
        KAFA_BROKER_EBS_VOLUME_SIZE = cdk.CfnParameter(
            self,
            'KafkaBrokerEBSVolumeSize',
            type='Number',
            description=
            'Apache Kafka Broker EBS Volume size (Minimum: 1 GiB, Maximum: 16384 GiB)',
            default='100',
            min_value=1,
            max_value=16384)

        MSK_CLIENT_SG_NAME = 'use-msk-sg-{}'.format(''.join(
            random.sample((string.ascii_lowercase), k=5)))
        sg_use_msk = aws_ec2.SecurityGroup(
            self,
            'KafkaClientSecurityGroup',
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for Amazon MSK client',
            security_group_name=MSK_CLIENT_SG_NAME)
        cdk.Tags.of(sg_use_msk).add('Name', MSK_CLIENT_SG_NAME)

        MSK_CLUSTER_SG_NAME = 'msk-sg-{}'.format(''.join(
            random.sample((string.ascii_lowercase), k=5)))
        sg_msk_cluster = aws_ec2.SecurityGroup(
            self,
            'MSKSecurityGroup',
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for Amazon MSK Cluster',
            security_group_name=MSK_CLUSTER_SG_NAME)
        sg_msk_cluster.add_ingress_rule(
            peer=sg_use_msk,
            connection=aws_ec2.Port.tcp(2181),
            description='msk client security group')
        sg_msk_cluster.add_ingress_rule(
            peer=sg_use_msk,
            connection=aws_ec2.Port.tcp(9092),
            description='msk client security group')
        sg_msk_cluster.add_ingress_rule(
            peer=sg_use_msk,
            connection=aws_ec2.Port.tcp(9094),
            description='msk client security group')
        cdk.Tags.of(sg_msk_cluster).add('Name', MSK_CLUSTER_SG_NAME)

        msk_broker_ebs_storage_info = aws_msk.CfnCluster.EBSStorageInfoProperty(
            volume_size=KAFA_BROKER_EBS_VOLUME_SIZE.value_as_number)

        msk_broker_storage_info = aws_msk.CfnCluster.StorageInfoProperty(
            ebs_storage_info=msk_broker_ebs_storage_info)

        msk_broker_node_group_info = aws_msk.CfnCluster.BrokerNodeGroupInfoProperty(
            client_subnets=vpc.select_subnets(
                subnet_type=aws_ec2.SubnetType.PRIVATE_WITH_NAT).subnet_ids,
            instance_type=KAFA_BROKER_INSTANCE_TYPE.value_as_string,
            security_groups=[
                sg_use_msk.security_group_id, sg_msk_cluster.security_group_id
            ],
            storage_info=msk_broker_storage_info)

        msk_encryption_info = aws_msk.CfnCluster.EncryptionInfoProperty(
            encryption_in_transit=aws_msk.CfnCluster.
            EncryptionInTransitProperty(client_broker='TLS_PLAINTEXT',
                                        in_cluster=True))

        msk_cluster = aws_msk.CfnCluster(
            self,
            'AWSKafkaCluster',
            broker_node_group_info=msk_broker_node_group_info,
            cluster_name=MSK_CLUSTER_NAME.value_as_string,
            #XXX: Supported Apache Kafka versions
            # https://docs.aws.amazon.com/msk/latest/developerguide/supported-kafka-versions.html
            kafka_version=KAFA_VERSION.value_as_string,
            number_of_broker_nodes=3,
            encryption_info=msk_encryption_info,
            enhanced_monitoring='PER_TOPIC_PER_BROKER')

        amzn_linux = aws_ec2.MachineImage.latest_amazon_linux(
            generation=aws_ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=aws_ec2.AmazonLinuxEdition.STANDARD,
            virtualization=aws_ec2.AmazonLinuxVirt.HVM,
            storage=aws_ec2.AmazonLinuxStorage.GENERAL_PURPOSE,
            cpu_type=aws_ec2.AmazonLinuxCpuType.X86_64)

        KAFKA_CLIENT_EC2_SG_NAME = 'kafka-client-ec2-sg-{}'.format(''.join(
            random.sample((string.ascii_lowercase), k=5)))
        sg_kafka_client_ec2_instance = aws_ec2.SecurityGroup(
            self,
            'KafkaClientEC2InstanceSG',
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for Kafka Client EC2 Instance',
            security_group_name=KAFKA_CLIENT_EC2_SG_NAME)
        cdk.Tags.of(sg_kafka_client_ec2_instance).add(
            'Name', KAFKA_CLIENT_EC2_SG_NAME)
        sg_kafka_client_ec2_instance.add_ingress_rule(
            peer=aws_ec2.Peer.ipv4("0.0.0.0/0"),
            connection=aws_ec2.Port.tcp(22))

        kafka_client_ec2_instance_role = aws_iam.Role(
            self,
            'KafkaClientEC2InstanceRole',
            role_name='{}-KafkaClientEC2InstanceRole'.format(self.stack_name),
            assumed_by=aws_iam.ServicePrincipal('ec2.amazonaws.com'),
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AmazonSSMManagedInstanceCore'),
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AmazonMSKReadOnlyAccess')
            ])

        msk_client_ec2_instance = aws_ec2.Instance(
            self,
            'KafkaClientEC2Instance',
            instance_type=aws_ec2.InstanceType.of(
                instance_class=aws_ec2.InstanceClass.BURSTABLE2,
                instance_size=aws_ec2.InstanceSize.MICRO),
            machine_image=amzn_linux,
            vpc=vpc,
            availability_zone=vpc.select_subnets(
                subnet_type=aws_ec2.SubnetType.PRIVATE_WITH_NAT).
            availability_zones[0],
            instance_name='KafkaClientInstance',
            role=kafka_client_ec2_instance_role,
            security_group=sg_kafka_client_ec2_instance,
            vpc_subnets=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PUBLIC))
        msk_client_ec2_instance.add_security_group(sg_use_msk)

        commands = '''
yum update -y 
yum install python3.7 -y
yum install java-1.8.0-openjdk-devel -y

cd /home/ec2-user
echo "export PATH=.local/bin:$PATH" >> .bash_profile

mkdir -p opt
cd opt
wget https://archive.apache.org/dist/kafka/2.2.1/kafka_2.12-2.2.1.tgz
tar -xzf kafka_2.12-2.2.1.tgz
ln -nsf kafka_2.12-2.2.1 kafka

cd /home/ec2-user
wget https://bootstrap.pypa.io/get-pip.py
su -c "python3.7 get-pip.py --user" -s /bin/sh ec2-user
su -c "/home/ec2-user/.local/bin/pip3 install boto3 --user" -s /bin/sh ec2-user

chown -R ec2-user ./opt
chgrp -R ec2-user ./opt
'''

        msk_client_ec2_instance.user_data.add_commands(commands)

        cdk.CfnOutput(self,
                      'StackName',
                      value=self.stack_name,
                      export_name='StackName')
        cdk.CfnOutput(self, 'VpcId', value=vpc.vpc_id, export_name='VpcId')

        cdk.CfnOutput(self,
                      'MSKSecurityGroupID',
                      value=sg_msk_cluster.security_group_id,
                      export_name='MSKSecurityGroupID')
        cdk.CfnOutput(self,
                      'KafkaClientSecurityGroupID',
                      value=sg_use_msk.security_group_id,
                      export_name='KafkaClientSecurityGroupID')
        cdk.CfnOutput(self,
                      'MSKClusterArn',
                      value=msk_cluster.ref,
                      export_name='MSKClusterArn')

        cdk.CfnOutput(self,
                      'KafkaClientEC2InstancePublicDNS',
                      value=msk_client_ec2_instance.instance_public_dns_name,
                      export_name='KafkaClientEC2InstancePublicDNS')