Beispiel #1
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        vpc_stack,
        logstash_ec2=True,
        logstash_fargate=True,
        **kwargs,
    ) -> None:
        super().__init__(scope, id, **kwargs)

        # get s3 bucket name
        s3client = boto3.client("s3")
        s3_bucket_list = s3client.list_buckets()
        s3_bucket_name = ""
        for bkt in s3_bucket_list["Buckets"]:
            try:
                bkt_tags = s3client.get_bucket_tagging(
                    Bucket=bkt["Name"])["TagSet"]
                for keypairs in bkt_tags:
                    if (keypairs["Key"] == "aws:cloudformation:stack-name"
                            and keypairs["Value"] == "elkk-athena"):
                        s3_bucket_name = bkt["Name"]
            except ClientError as err:
                if err.response["Error"]["Code"] in [
                        "NoSuchTagSet", "NoSuchBucket"
                ]:
                    pass
                else:
                    print(f"Unexpected error: {err}")

        # get elastic endpoint
        esclient = boto3.client("es")
        es_domains = esclient.list_domain_names()
        try:
            es_domain = [
                dom["DomainName"] for dom in es_domains["DomainNames"]
                if "elkk-" in dom["DomainName"]
            ][0]
            es_endpoint = esclient.describe_elasticsearch_domain(
                DomainName=es_domain)
            es_endpoint = es_endpoint["DomainStatus"]["Endpoints"]["vpc"]
        except IndexError:
            es_endpoint = ""

        # assets for logstash stack
        logstash_yml = assets.Asset(self,
                                    "logstash_yml",
                                    path=os.path.join(dirname, "logstash.yml"))
        logstash_repo = assets.Asset(self,
                                     "logstash_repo",
                                     path=os.path.join(dirname,
                                                       "logstash.repo"))

        # update conf file to .asset
        # kafka brokerstring does not need reformatting
        logstash_conf_asset = file_updated(
            os.path.join(dirname, "logstash.conf"),
            {
                "$s3_bucket": s3_bucket_name,
                "$es_endpoint": es_endpoint,
                "$kafka_brokers": kafka_get_brokers(),
                "$elkk_region": os.environ["CDK_DEFAULT_REGION"],
            },
        )
        logstash_conf = assets.Asset(
            self,
            "logstash.conf",
            path=logstash_conf_asset,
        )

        # logstash security group
        logstash_security_group = ec2.SecurityGroup(
            self,
            "logstash_security_group",
            vpc=vpc_stack.get_vpc,
            description="logstash security group",
            allow_all_outbound=True,
        )
        core.Tags.of(logstash_security_group).add("project",
                                                  constants["PROJECT_TAG"])
        core.Tags.of(logstash_security_group).add("Name", "logstash_sg")

        # Open port 22 for SSH
        logstash_security_group.add_ingress_rule(
            ec2.Peer.ipv4(f"{external_ip}/32"),
            ec2.Port.tcp(22),
            "from own public ip",
        )

        # get security group for kafka
        ec2client = boto3.client("ec2")
        security_groups = ec2client.describe_security_groups(Filters=[{
            "Name":
            "tag-value",
            "Values": [constants["PROJECT_TAG"]]
        }], )

        # if kafka sg does not exist ... don't add it
        try:
            kafka_sg_id = [
                sg["GroupId"] for sg in security_groups["SecurityGroups"]
                if "kafka security group" in sg["Description"]
            ][0]
            kafka_security_group = ec2.SecurityGroup.from_security_group_id(
                self, "kafka_security_group", security_group_id=kafka_sg_id)

            # let in logstash
            kafka_security_group.connections.allow_from(
                logstash_security_group,
                ec2.Port.all_traffic(),
                "from logstash",
            )
        except IndexError:
            # print("kafka_sg_id and kafka_security_group not found")
            pass

        # get security group for elastic
        try:
            elastic_sg_id = [
                sg["GroupId"] for sg in security_groups["SecurityGroups"]
                if "elastic security group" in sg["Description"]
            ][0]
            elastic_security_group = ec2.SecurityGroup.from_security_group_id(
                self,
                "elastic_security_group",
                security_group_id=elastic_sg_id)

            # let in logstash
            elastic_security_group.connections.allow_from(
                logstash_security_group,
                ec2.Port.all_traffic(),
                "from logstash",
            )
        except IndexError:
            pass

        # elastic policy
        access_elastic_policy = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=[
                "es:ListDomainNames",
                "es:DescribeElasticsearchDomain",
                "es:ESHttpPut",
            ],
            resources=["*"],
        )

        # kafka policy
        access_kafka_policy = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=["kafka:ListClusters", "kafka:GetBootstrapBrokers"],
            resources=["*"],
        )

        # s3 policy
        access_s3_policy = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=["s3:ListBucket", "s3:PutObject"],
            resources=["*"],
        )

        # create the Logstash instance
        if logstash_ec2:
            # userdata for Logstash
            logstash_userdata = user_data_init(
                log_group_name="elkk/logstash/instance")
            # create the instance
            logstash_instance = ec2.Instance(
                self,
                "logstash_client",
                instance_type=ec2.InstanceType(constants["LOGSTASH_INSTANCE"]),
                machine_image=ec2.AmazonLinuxImage(
                    generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2),
                vpc=vpc_stack.get_vpc,
                vpc_subnets=SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
                key_name=constants["KEY_PAIR"],
                security_group=logstash_security_group,
                user_data=logstash_userdata,
            )
            core.Tag.add(logstash_instance, "project",
                         constants["PROJECT_TAG"])

            # add access to the file assets
            logstash_yml.grant_read(logstash_instance)
            logstash_repo.grant_read(logstash_instance)
            logstash_conf.grant_read(logstash_instance)

            # add permissions to instance
            logstash_instance.add_to_role_policy(
                statement=access_elastic_policy)
            logstash_instance.add_to_role_policy(statement=access_kafka_policy)
            logstash_instance.add_to_role_policy(statement=access_s3_policy)

            # add log permissions
            instance_add_log_permissions(logstash_instance)

            # add commands to the userdata
            logstash_userdata.add_commands(
                # get setup assets files
                f"aws s3 cp s3://{logstash_yml.s3_bucket_name}/{logstash_yml.s3_object_key} /home/ec2-user/logstash.yml",
                f"aws s3 cp s3://{logstash_repo.s3_bucket_name}/{logstash_repo.s3_object_key} /home/ec2-user/logstash.repo",
                f"aws s3 cp s3://{logstash_conf.s3_bucket_name}/{logstash_conf.s3_object_key} /home/ec2-user/logstash.conf",
                # install java
                "amazon-linux-extras install java-openjdk11 -y",
                # install git
                "yum install git -y",
                # install pip
                "yum install python-pip -y",
                # get elastic output to es
                "git clone https://github.com/awslabs/logstash-output-amazon_es.git /home/ec2-user/logstash-output-amazon_es",
                # logstash
                "rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch",
                # move logstash repo file
                "mv -f /home/ec2-user/logstash.repo /etc/yum.repos.d/logstash.repo",
                # get to the yum
                "yum install logstash -y",
                # add user to logstash group
                "usermod -a -G logstash ec2-user",
                # move logstash.yml to final location
                "mv -f /home/ec2-user/logstash.yml /etc/logstash/logstash.yml",
                # move logstash.conf to final location
                "mv -f /home/ec2-user/logstash.conf /etc/logstash/conf.d/logstash.conf",
                # move plugin
                "mkdir /usr/share/logstash/plugins",
                "mv -f /home/ec2-user/logstash-output-amazon_es /usr/share/logstash/plugins/logstash-output-amazon_es",
                # update gemfile
                """sed -i '5igem "logstash-output-amazon_es", :path => "/usr/share/logstash/plugins/logstash-output-amazon_es"' /usr/share/logstash/Gemfile""",
                # update ownership
                "chown -R logstash:logstash /etc/logstash",
                # start logstash
                "systemctl start logstash.service",
            )
            # add the signal
            logstash_userdata.add_signal_on_exit_command(
                resource=logstash_instance)

            # add creation policy for instance
            logstash_instance.instance.cfn_options.creation_policy = core.CfnCreationPolicy(
                resource_signal=core.CfnResourceSignal(count=1,
                                                       timeout="PT10M"))

        # fargate for logstash
        if logstash_fargate:
            # cloudwatch log group for containers
            logstash_logs_containers = logs.LogGroup(
                self,
                "logstash_logs_containers",
                log_group_name="elkk/logstash/container",
                removal_policy=core.RemovalPolicy.DESTROY,
                retention=logs.RetentionDays.ONE_WEEK,
            )
            # docker image for logstash
            logstash_image_asset = ecr_assets.DockerImageAsset(
                self,
                "logstash_image_asset",
                directory=dirname  # , file="Dockerfile"
            )

            # create the fargate cluster
            logstash_cluster = ecs.Cluster(self,
                                           "logstash_cluster",
                                           vpc=vpc_stack.get_vpc)
            core.Tag.add(logstash_cluster, "project", constants["PROJECT_TAG"])

            # the task
            logstash_task = ecs.FargateTaskDefinition(
                self,
                "logstash_task",
                cpu=512,
                memory_limit_mib=1024,
            )

            # add container to the task
            logstash_task.add_container(
                logstash_image_asset.source_hash,
                image=ecs.ContainerImage.from_docker_image_asset(
                    logstash_image_asset),
                logging=ecs.LogDrivers.aws_logs(
                    stream_prefix="elkk", log_group=logstash_logs_containers),
            )

            # add permissions to the task
            logstash_task.add_to_task_role_policy(access_s3_policy)
            logstash_task.add_to_task_role_policy(access_elastic_policy)

            # the service
            logstash_service = (ecs.FargateService(
                self,
                "logstash_service",
                cluster=logstash_cluster,
                task_definition=logstash_task,
                security_group=logstash_security_group,
                deployment_controller=ecs.DeploymentController(
                    type=ecs.DeploymentControllerType.ECS),
            ).auto_scale_task_count(
                min_capacity=3, max_capacity=10).scale_on_cpu_utilization(
                    "logstash_scaling",
                    target_utilization_percent=75,
                    scale_in_cooldown=core.Duration.seconds(60),
                    scale_out_cooldown=core.Duration.seconds(60),
                ))
Beispiel #2
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        vpc_stack,
        # kafka_stack,
        client: bool = True,
        **kwargs,
    ) -> None:
        super().__init__(scope, id, **kwargs)

        # ensure that the service linked role exists
        ensure_service_linked_role("es.amazonaws.com")

        # cloudwatch log group
        elastic_log_group = logs.LogGroup(
            self,
            "elastic_log_group",
            log_group_name="elkk/elastic/aes",
            removal_policy=core.RemovalPolicy.DESTROY,
            retention=logs.RetentionDays.ONE_WEEK,
        )

        # security group for elastic client
        elastic_client_security_group = ec2.SecurityGroup(
            self,
            "elastic_client_security_group",
            vpc=vpc_stack.get_vpc,
            description="elastic client security group",
            allow_all_outbound=True,
        )
        core.Tag.add(elastic_client_security_group, "project",
                     constants["PROJECT_TAG"])
        core.Tag.add(elastic_client_security_group, "Name",
                     "elastic_client_sg")
        # Open port 22 for SSH
        elastic_client_security_group.add_ingress_rule(
            ec2.Peer.ipv4(f"{external_ip}/32"),
            ec2.Port.tcp(22),
            "from own public ip",
        )
        # Open port for tunnel
        elastic_client_security_group.add_ingress_rule(
            ec2.Peer.ipv4(f"{external_ip}/32"),
            ec2.Port.tcp(9200),
            "for ssh tunnel",
        )

        # security group for elastic
        self.elastic_security_group = ec2.SecurityGroup(
            self,
            "elastic_security_group",
            vpc=vpc_stack.get_vpc,
            description="elastic security group",
            allow_all_outbound=True,
        )
        core.Tag.add(self.elastic_security_group, "project",
                     constants["PROJECT_TAG"])
        core.Tag.add(self.elastic_security_group, "Name", "elastic_sg")

        # ingress for elastic from self
        self.elastic_security_group.connections.allow_from(
            self.elastic_security_group,
            ec2.Port.all_traffic(),
            "within elastic",
        )
        # ingress for elastic from elastic client
        self.elastic_security_group.connections.allow_from(
            elastic_client_security_group,
            ec2.Port.all_traffic(),
            "from elastic client",
        )
        # ingress for elastic client from elastic
        elastic_client_security_group.connections.allow_from(
            self.elastic_security_group,
            ec2.Port.all_traffic(),
            "from elastic",
        )

        # elastic policy
        elastic_policy = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=[
                "es:*",
            ],
            resources=["*"],
        )
        elastic_policy.add_any_principal()
        elastic_document = iam.PolicyDocument()
        elastic_document.add_statements(elastic_policy)

        # cluster config
        cluster_config = {
            "instanceCount": constants["ELASTIC_INSTANCE_COUNT"],
            "instanceType": constants["ELASTIC_INSTANCE"],
            "zoneAwarenessEnabled": True,
            "zoneAwarenessConfig": {
                "availabilityZoneCount": 3
            },
        }
        if constants["ELASTIC_DEDICATED_MASTER"] == True:
            cluster_config["dedicatedMasterEnabled"] = True
            cluster_config["dedicatedMasterType"] = constants[
                "ELASTIC_MASTER_INSTANCE"]
            cluster_config["dedicatedMasterCount"] = constants[
                "ELASTIC_MASTER_COUNT"]

        # create the elastic cluster
        self.elastic_domain = aes.CfnDomain(
            self,
            "elastic_domain",
            elasticsearch_cluster_config=cluster_config,
            elasticsearch_version=constants["ELASTIC_VERSION"],
            ebs_options={
                "ebsEnabled": True,
                "volumeSize": 10
            },
            vpc_options={
                "securityGroupIds":
                [self.elastic_security_group.security_group_id],
                "subnetIds": vpc_stack.get_vpc_private_subnet_ids,
            },
            access_policies=elastic_document,
            #log_publishing_options={"enabled": True},
            #cognito_options={"enabled": True},
        )
        core.Tag.add(self.elastic_domain, "project", constants["PROJECT_TAG"])

        # instance for elasticsearch
        if client == True:
            # userdata for kafka client
            elastic_userdata = user_data_init(
                log_group_name="elkk/elastic/instance")
            # create the instance
            elastic_instance = ec2.Instance(
                self,
                "elastic_client",
                instance_type=ec2.InstanceType(
                    constants["ELASTIC_CLIENT_INSTANCE"]),
                machine_image=ec2.AmazonLinuxImage(
                    generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2),
                vpc=vpc_stack.get_vpc,
                vpc_subnets={"subnet_type": ec2.SubnetType.PUBLIC},
                key_name=constants["KEY_PAIR"],
                security_group=elastic_client_security_group,
                user_data=elastic_userdata,
            )
            core.Tag.add(elastic_instance, "project", constants["PROJECT_TAG"])
            # needs elastic domain to be available
            elastic_instance.node.add_dependency(self.elastic_domain)
            # create policies for EC2 to connect to Elastic
            access_elastic_policy = iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                actions=[
                    "es:ListDomainNames",
                    "es:DescribeElasticsearchDomain",
                    "es:ESHttpPut",
                ],
                resources=["*"],
            )
            # add the role permissions
            elastic_instance.add_to_role_policy(
                statement=access_elastic_policy)
            # add log permissions
            instance_add_log_permissions(elastic_instance)
            # add the signal
            elastic_userdata.add_signal_on_exit_command(
                resource=elastic_instance)
            # add creation policy for instance
            elastic_instance.instance.cfn_options.creation_policy = core.CfnCreationPolicy(
                resource_signal=core.CfnResourceSignal(count=1,
                                                       timeout="PT10M"))
    def __init__(
        self, scope: core.Construct, id: str, vpc_stack, kafka_stack, **kwargs
    ) -> None:
        super().__init__(scope, id, **kwargs)

        # log generator asset
        log_generator_py = assets.Asset(
            self, "log_generator", path=os.path.join(dirname, "log_generator.py")
        )
        # log generator requirements.txt asset
        log_generator_requirements_txt = assets.Asset(
            self,
            "log_generator_requirements_txt",
            path=os.path.join(dirname, "log_generator_requirements.txt"),
        )

        # get kakfa brokers
        kafka_brokers = f'''"{kafka_get_brokers().replace(",", '", "')}"'''

        # update filebeat.yml to .asset
        filebeat_yml_asset = file_updated(
            os.path.join(dirname, "filebeat.yml"), {"$kafka_brokers": kafka_brokers},
        )
        filebeat_yml = assets.Asset(self, "filebeat_yml", path=filebeat_yml_asset)
        elastic_repo = assets.Asset(
            self, "elastic_repo", path=os.path.join(dirname, "elastic.repo")
        )
        # userdata for Filebeat
        fb_userdata = user_data_init(log_group_name="elkk/filebeat/instance")
        # instance for Filebeat
        fb_instance = ec2.Instance(
            self,
            "filebeat_client",
            instance_type=ec2.InstanceType(constants["FILEBEAT_INSTANCE"]),
            machine_image=ec2.AmazonLinuxImage(
                generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2
            ),
            vpc=vpc_stack.get_vpc,
            vpc_subnets={"subnet_type": ec2.SubnetType.PUBLIC},
            key_name=constants["KEY_PAIR"],
            security_group=kafka_stack.get_kafka_client_security_group,
            user_data=fb_userdata,
        )
        core.Tag.add(fb_instance, "project", constants["PROJECT_TAG"])

        # create policies for EC2 to connect to kafka
        access_kafka_policy = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=["kafka:ListClusters", "kafka:GetBootstrapBrokers",],
            resources=["*"],
        )
        # add the role permissions
        fb_instance.add_to_role_policy(statement=access_kafka_policy)
        # add log permissions
        instance_add_log_permissions(fb_instance)
        # add access to the file asset
        filebeat_yml.grant_read(fb_instance)
        elastic_repo.grant_read(fb_instance)
        log_generator_py.grant_read(fb_instance)
        log_generator_requirements_txt.grant_read(fb_instance)
        # add commands to the userdata
        fb_userdata.add_commands(
            # get setup assets files
            f"aws s3 cp s3://{filebeat_yml.s3_bucket_name}/{filebeat_yml.s3_object_key} /home/ec2-user/filebeat.yml",
            f"aws s3 cp s3://{elastic_repo.s3_bucket_name}/{elastic_repo.s3_object_key} /home/ec2-user/elastic.repo",
            f"aws s3 cp s3://{log_generator_py.s3_bucket_name}/{log_generator_py.s3_object_key} /home/ec2-user/log_generator.py",
            f"aws s3 cp s3://{log_generator_requirements_txt.s3_bucket_name}/{log_generator_requirements_txt.s3_object_key} /home/ec2-user/requirements.txt",
            # get python3
            "yum install python3 -y",
            # get pip
            "yum install python-pip -y",
            # make log generator executable
            "chmod +x /home/ec2-user/log_generator.py",
            # get log generator requirements
            "python3 -m pip install -r /home/ec2-user/requirements.txt",
            # Filebeat
            "rpm --import https://packages.elastic.co/GPG-KEY-elasticsearch",
            # move Filebeat repo file
            "mv -f /home/ec2-user/elastic.repo /etc/yum.repos.d/elastic.repo",
            # install Filebeat
            "yum install filebeat -y",
            # move filebeat.yml to final location
            "mv -f /home/ec2-user/filebeat.yml /etc/filebeat/filebeat.yml",
            # update log generator ownership
            "chown -R ec2-user:ec2-user /home/ec2-user",
            # start Filebeat
            "systemctl start filebeat",
        )
        # add the signal
        fb_userdata.add_signal_on_exit_command(resource=fb_instance)
        # attach the userdata
        fb_instance.add_user_data(fb_userdata.render())
        # add creation policy for instance
        fb_instance.instance.cfn_options.creation_policy = core.CfnCreationPolicy(
            resource_signal=core.CfnResourceSignal(count=1, timeout="PT10M")
        )
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 vpc_stack,
                 client: bool = True,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ensure_service_linked_role("kafka.amazonaws.com")

        client_properties = assets.Asset(self,
                                         "client_properties",
                                         path=os.path.join(
                                             dirname, "client.properties"))

        self.kafka_client_security_group = ec2.SecurityGroup(
            self,
            "kafka_client_security_group",
            vpc=vpc_stack.get_vpc,
            description="kafka client security group",
            allow_all_outbound=True,
        )
        # core.Tag.add(self.kafka_client_security_group, "project", constants["PROJECT_TAG"])
        core.Tags.of(self.kafka_client_security_group).add(
            "project", constants["PROJECT_TAG"])
        # core.Tag.add(self.kafka_client_security_group, "Name", "kafka_client_sg")
        core.Tags.of(self.kafka_client_security_group).add(
            "Name", "kafka_client_sg")

        self.kafka_client_security_group.add_ingress_rule(
            ec2.Peer.ipv4(f"{external_ip}/32"),
            ec2.Port.tcp(22),
            "from own public ip",
        )

        self.kafka_security_group = ec2.SecurityGroup(
            self,
            "kafka_security_group",
            vpc=vpc_stack.get_vpc,
            description="kafka security group",
            allow_all_outbound=True,
        )
        core.Tags.of(self.kafka_security_group).add("project",
                                                    constants["PROJECT_TAG"])
        core.Tags.of(self.kafka_security_group).add("Name", "kafka_sg")

        self.kafka_security_group.connections.allow_from(
            self.kafka_security_group,
            ec2.Port.all_traffic(),
            "within kafka",
        )
        self.kafka_security_group.connections.allow_from(
            self.kafka_client_security_group,
            ec2.Port.all_traffic(),
            "from kafka client sg",
        )

        self.kafka_client_security_group.connections.allow_from(
            self.kafka_security_group,
            ec2.Port.all_traffic(),
            "from kafka",
        )

        self.kafka_cluster = msk.CfnCluster(
            self,
            "kafka_cluster",
            broker_node_group_info={
                "clientSubnets": vpc_stack.get_vpc_public_subnet_ids,
                "instanceType": constants["KAFKA_INSTANCE_TYPE"],
                "numberOfBrokerNodes": constants["KAFKA_BROKER_NODES"],
                "securityGroups":
                [self.kafka_security_group.security_group_id],
            },
            encryption_info={
                "encryptionInTransit": {
                    "InCluster": "true",
                    "clientBroker": "PLAINTEXT",
                },
            },
            cluster_name=constants["PROJECT_TAG"],
            kafka_version=constants["KAFKA_VERSION"],
            number_of_broker_nodes=constants["KAFKA_BROKER_NODES"],
            enhanced_monitoring="DEFAULT",
        )
        core.Tags.of(self.kafka_cluster).add("project",
                                             constants["PROJECT_TAG"])

        # instance for kafka client
        if client == True:
            # userdata for kafka client
            kafka_client_userdata = user_data_init(
                log_group_name="filebeat-kafka/kafka/instance")
            # create the instance
            kafka_client_instance = ec2.Instance(
                self,
                "kafka_client",
                instance_type=ec2.InstanceType(
                    constants["KAFKA_CLIENT_INSTANCE"]),
                machine_image=ec2.AmazonLinuxImage(
                    generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2),
                vpc=vpc_stack.get_vpc,
                vpc_subnets={"subnet_type": ec2.SubnetType.PUBLIC},
                key_name=constants["KEY_PAIR"],
                security_group=self.kafka_client_security_group,
                user_data=kafka_client_userdata,
            )
            core.Tags.of(kafka_client_instance).add("project",
                                                    constants["PROJECT_TAG"])
            # needs kafka cluster to be available
            kafka_client_instance.node.add_dependency(self.kafka_cluster)
            # create policies for EC2 to connect to Kafka
            access_kafka_policy = iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                actions=[
                    "kafka:ListClusters",
                    "kafka:GetBootstrapBrokers",
                    "kafka:DescribeCluster",
                ],
                resources=["*"],
            )
            # add the role permissions
            kafka_client_instance.add_to_role_policy(
                statement=access_kafka_policy)
            # add log permissions
            instance_add_log_permissions(kafka_client_instance)
            # add access to the file asset
            client_properties.grant_read(kafka_client_instance)
            # update the userdata with commands
            kafka_client_userdata.add_commands(
                f"aws s3 cp s3://{client_properties.s3_bucket_name}/{client_properties.s3_object_key} /home/ec2-user/client.properties",
                "yum install java-1.8.0 -y",
                f'curl https:/archive.apache.org/dist/kafka/{constants["KAFKA_DOWNLOAD_VERSION"].split("-")[-1]}/{constants["KAFKA_DOWNLOAD_VERSION"]}.tgz -o {constants["KAFKA_DOWNLOAD_VERSION"]}.tgz',
                f"tar -xvf {constants['KAFKA_DOWNLOAD_VERSION']}.tgz",
                f"mv {constants['KAFKA_DOWNLOAD_VERSION']} /opt",
                f"rm {constants['KAFKA_DOWNLOAD_VERSION']}.tgz",
                f"mv -f /home/ec2-user/client.properties /opt/{constants['KAFKA_DOWNLOAD_VERSION']}/bin/client.properties",
                # create the topic, if already exists capture error message
                f"kafka_arn=`aws kafka list-clusters --region {core.Aws.REGION} --output text --query 'ClusterInfoList[*].ClusterArn'`",
                f"kafka_zookeeper=`aws kafka describe-cluster --cluster-arn $kafka_arn --region {core.Aws.REGION} --output text --query 'ClusterInfo.ZookeeperConnectString'`",
                f"make_topic=`/opt/{constants['KAFKA_DOWNLOAD_VERSION']}/bin/kafka-topics.sh --create --zookeeper $kafka_zookeeper --replication-factor 3 --partitions 1 --topic topic 2>&1`",
                f"make_topic=`/opt/{constants['KAFKA_DOWNLOAD_VERSION']}/bin/kafka-topics.sh --create --zookeeper $kafka_zookeeper --replication-factor 3 --partitions 1 --topic apachelog 2>&1`",
                f"make_topic=`/opt/{constants['KAFKA_DOWNLOAD_VERSION']}/bin/kafka-topics.sh --create --zookeeper $kafka_zookeeper --replication-factor 3 --partitions 1 --topic appevent 2>&1`",
            )
            # add the signal
            kafka_client_userdata.add_signal_on_exit_command(
                resource=kafka_client_instance)

            kafka_client_instance.add_user_data(kafka_client_userdata.render())