Example #1
0
 def __init__(self, cluster, conf=None, on=None):
     """
     @param cluster     Current cluster.
     @param conf        Configuration dict.
            port        Port at which OauthbearerOIDCApp should be bound
                        (optional). A (random) free port will be chosen
                        otherwise.
     @param on          Node name to run on.
     """
     super(OauthbearerOIDCApp, self).__init__(cluster, conf=conf, on=on)
     self.conf['port'] = trivup.TcpPortAllocator(self.cluster).next(
         self, port_base=self.conf.get('port', None))
     self.conf['valid_url'] = 'http://localhost:%d/retrieve' % \
         self.conf['port']
     self.conf['badformat_url'] = 'http://localhost:%d/retrieve/badformat' \
         % self.conf['port']
     self.conf['expired_url'] = 'http://localhost:%d/retrieve/expire' % \
         self.conf['port']
     self.conf['jwks_url'] = 'http://localhost:%d/keys' % self.conf['port']
     self.conf['sasl_oauthbearer_method'] = 'OIDC'
     self.conf['sasl_oauthbearer_client_id'] = '123'
     self.conf['sasl_oauthbearer_client_secret'] = 'abc'
     self.conf['sasl_oauthbearer_scope'] = 'test'
     self.conf['sasl_oauthbearer_extensions'] = \
         'ExtensionworkloadIdentity=develC348S,Extensioncluster=lkc123'
Example #2
0
    def __init__(self, cluster, realm, conf=None, on=None):
        """
        @param cluster     Current cluster
        @param realm       Realm name
        @param conf        Configuration dict, ignored.
        @param on          Node name to run on
        """
        super(KerberosKdcApp, self).__init__(cluster, conf=conf, on=on)

        self.conf['port'] = trivup.TcpPortAllocator(self.cluster).next(self)
        self.conf['realm'] = realm
        self.conf['address'] = '%(nodename)s:%(port)d' % self.conf
        self.conf['dbpath'] = self.mkpath('database')
        self.conf['admin_keytab'] = self.mkpath('admin_keytab')
        self.conf['stash_file'] = self.mkpath('stash_file')

        # Generate config files
        self.conf['krb5_conf'] = self.create_file_from_template(
            'krb5.conf', self.conf)
        self.env_add('KRB5_CONFIG', self.conf['krb5_conf'])
        self.conf['kdc_conf'] = self.create_file_from_template(
            'kdc.conf', self.conf)
        self.env_add('KRB5_KDC_PROFILE', self.conf['kdc_conf'])

        # Create database and stash file
        r = self.execute(
            'kdb5_util -P "" -r %(realm)s -d "%(dbpath)s" -sf "%(stash_file)s" create -s'
            % self.conf).wait()
        if r != 0:
            raise Exception('Failed to create kdb5 database')

        self.conf['start_cmd'] = '/usr/sbin/krb5kdc -n'
        self.conf['stop_cmd'] = None  # Ctrl-C
Example #3
0
    def __init__(self, cluster, conf=None, on=None, bin_path=None):
        """
        @param cluster     Current cluster
        @param on          Node name to run on

        Config:
          bindir    Path to zookeeper-server-start.sh directory (optional)
                    Falls back to Kafka bindir

        Exposes 'address' (host:port) for other apps.
        """
        super(ZookeeperApp, self).__init__(cluster, conf=conf, on=on)
        self.conf['port'] = trivup.TcpPortAllocator(self.cluster).next(self)
        self.conf['datadir'] = self.create_dir('datadir')
        self.conf['address'] = '%(nodename)s:%(port)d' % self.conf
        # Generate config file
        self.conf['conf_file'] = self.create_file_from_template('zookeeper.properties', self.conf)  # noqa: E501
Example #4
0
    def __init__(self, cluster, realm, conf=None, on=None):
        """
        @param cluster     Current cluster
        @param realm       Realm name
        @param conf        Configuration dict, optional.
           "port": port to bind to.
           "cross_realms": "realm1=kdc1:port,realm2=kdc2:port" - cross-realm.
                           The first cross realm is the $default_realm.
           "renew_lifetime": see krb5.conf docs  (default 10 min)
           "ticket_lifetime": see krb5.conf docs (default 60 min)
        @param on          Node name to run on
        """
        super(KerberosKdcApp, self).__init__(cluster, conf=conf, on=on)

        self.conf['realm'] = realm
        if self.conf.get('port', None) is None:
            self.conf['port'] = trivup.TcpPortAllocator(self.cluster).next(self)  # noqa: E501
        self.conf['address'] = '%(nodename)s:%(port)d' % self.conf
        self.conf['dbpath'] = self.mkpath('database')
        self.conf['admin_keytab'] = self.mkpath('admin_keytab')
        self.conf['stash_file'] = self.mkpath('stash_file')

        if self.conf.get('renew_lifetime', None) is None:
            self.conf['renew_lifetime'] = '12h'

        if self.conf.get('ticket_lifetime', None) is None:
            self.conf['ticket_lifetime'] = '30m'

        # Set up cross-realm trusts, if desired.
        cross_realms = self.conf.get('cross_realms', '').split(',')
        if len(cross_realms) > 0 and cross_realms[0] != '':
            cross_realms_conf = ""
            capaths_conf = ""
            for crinfo in cross_realms:
                crealm, ckdc = crinfo.split('=')
                if crealm == realm:
                    continue
                cross_realms_conf += " %s = {\n  kdc = %s\n  admin_server = %s\n }\n" % (crealm, ckdc, ckdc)  # noqa: E501
                capaths_conf += " %s = {\n  %s = .\n }\n" % (crealm, realm)
                capaths_conf += " %s = {\n  %s = .\n }\n" % (realm, crealm)

            self.conf['default_realm'] = cross_realms[0].split('=')[0]
            self.conf['cross_realms'] = cross_realms_conf
            self.conf['capaths'] = capaths_conf
        else:
            self.conf['default_realm'] = realm
            self.conf['cross_realms'] = ''
            self.conf['capaths'] = ''

        # Generate config files
        self.conf['krb5_conf'] = self.create_file_from_template('krb5.conf',
                                                                self.conf)
        self.env_add('KRB5_CONFIG', self.conf['krb5_conf'])
        self.conf['kdc_conf'] = self.create_file_from_template('kdc.conf',
                                                               self.conf)
        self.env_add('KRB5_KDC_PROFILE', self.conf['kdc_conf'])

        # Create database and stash file
        r = self.execute('kdb5_util -P "" -r %(realm)s -d "%(dbpath)s" -sf "%(stash_file)s" create -s' % self.conf).wait()  # noqa: E501
        if r != 0:
            raise Exception('Failed to create kdb5 database')

        self.conf['start_cmd'] = '/usr/sbin/krb5kdc -n'
        self.conf['stop_cmd'] = None  # Ctrl-C
Example #5
0
    def __init__(self, cluster, conf=None, on=None):
        """
        @param cluster     Current cluster
        @param conf        Configuration dict, see below.
        @param on          Node name to run on

        Supported conf keys:
           * version - Kafka version to use, will build 'trunk' from
                       kafka_path, otherwise the version is taken to be a
                       formal release which will be downloaded and deployed.
           * listeners - CSV list of listener types:
                         PLAINTEXT,SSL,SASL,SASL_SSL
           * listener_host - alternative listener host instead of
                             node name (e.g., '*')
           * advertised_hostname - hostname to use for advertised.listeners
                                   (defaults to 'on' node)
           * sasl_mechanisms - CSV list of SASL mechanisms to enable:
                               GSSAPI,PLAIN,SCRAM-SHA-n,OAUTHBEARER
                               SASL listeners will be added automatically.
                               KerberosKdcApp is required for GSSAPI.
           * sasl_users - CSV list of SASL PLAIN/SCRAM of user=pass for
                          authenticating clients
           * ssl_client_auth - ssl.client.auth broker property (def: required)
           * num_partitions - Topic auto-create partition count (3)
           * replication_Factor - Topic auto-create replication factor (1)
           * port_base - Low TCP port base to start allocating from (random)
           * kafka_path - Path to Kafka build tree (for trunk usage)
           * fdlimit - RLIMIT_NOFILE (or "max") (default: max)
           * conf - arbitary server.properties config as a list of strings.
           * realm - Kerberos realm to use when sasl_mechanisms contains GSSAPI
        """
        super(KafkaBrokerApp, self).__init__(cluster, conf=conf, on=on)

        self.zk = cluster.find_app('ZookeeperApp')
        if self.zk is None:
            raise Exception('ZookeeperApp required')

        # Kafka repo uses SVN nomenclature
        if self.conf['version'] == 'master':
            self.conf['version'] = 'trunk'

        if 'fdlimit' not in self.conf:
            self.conf['fdlimit'] = 'max'

        listener_host = self.conf.get('listener_host',
                                      self.conf.get('nodename'))
        # Kafka Configuration properties
        self.conf['log_dirs'] = self.create_dir('logs')
        if 'num_partitions' not in self.conf:
            self.conf['num_partitions'] = 3
        self.conf['zk_connect'] = self.zk.get('address', None)
        if 'replication_factor' not in self.conf:
            self.conf['replication_factor'] = 1

        # Kafka paths
        if self.conf.get('kafka_path', None) is not None:
            self.conf['destdir'] = self.conf['kafka_path']
            self.conf['bindir'] = os.path.join(self.conf['destdir'], 'bin')
            start_sh = os.path.join(self.conf['bindir'],
                                    'kafka-server-start.sh')
            kafka_configs_sh = os.path.join(self.conf['bindir'],
                                            'kafka-configs.sh')
        else:
            start_sh = 'kafka-server-start.sh'
            kafka_configs_sh = 'kafka-configs.sh'

        # Arbitrary (non-template) configuration statements
        conf_blob = self.conf.get('conf', list())
        jaas_blob = list()

        #
        # Configure listeners, SSL and SASL
        #
        listeners = self.conf.get('listeners', 'PLAINTEXT').split(',')

        # SASL support
        sasl_mechs = [
            x for x in self.conf.get('sasl_mechanisms', '').replace(
                ' ', '').split(',') if len(x) > 0
        ]
        if len(sasl_mechs) > 0:
            listeners.append('SASL_PLAINTEXT')

        # SSL support
        ssl = cluster.find_app(SslApp)
        if ssl is not None:
            # Add SSL listeners
            listeners.append('SSL')
            if len(sasl_mechs) > 0:
                listeners.append('SASL_SSL')

        listener_map = 'listener.security.protocol.map=' + \
            'PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:' + \
            'SASL_PLAINTEXT,SASL_SSL:SASL_SSL'

        can_docker = self.conf['version'] == 'trunk' or \
            int(self.conf['version'].split('.')[0]) > 0
        if can_docker:
            # Map DOCKER listener to PLAINTEXT security protocol
            listener_map += ',DOCKER:PLAINTEXT'

        conf_blob.append(listener_map)

        # Create listeners
        ports = [(x, trivup.TcpPortAllocator(self.cluster).next(
            self, self.conf.get('port_base', None)))
                 for x in sorted(set(listeners))]
        self.conf['port'] = ports[0][1]  # "Default" port

        if can_docker:
            # Add docker listener to allow services (e.g, SchemaRegistry) in
            # docker-containers to reach the on-host Kafka.
            docker_port = trivup.TcpPortAllocator(self.cluster).next(self)
            docker_host = '%s:%d' % (cluster.get_docker_host(), docker_port)

        self.conf['address'] = '%s:%d' % (listener_host, self.conf['port'])
        listeners = ['%s://%s:%d' % (x[0], "0.0.0.0", x[1]) for x in ports]
        if can_docker:
            listeners.append('%s://%s:%d' % ('DOCKER', "0.0.0.0", docker_port))
        self.conf['listeners'] = ','.join(listeners)
        if 'advertised_hostname' not in self.conf:
            self.conf['advertised_hostname'] = self.conf['nodename']
        advertised_listeners = [
            '%s://%s:%d' % (x[0], self.conf['advertised_hostname'], x[1])
            for x in ports
        ]
        if can_docker:
            # Expose service to docker containers as well.
            advertised_listeners.append('DOCKER://%s' % docker_host)
            self.conf['docker_advertised_listeners'] = 'PLAINTEXT://%s' % \
                docker_host
        self.conf['advertised.listeners'] = ','.join(advertised_listeners)
        self.conf['advertised_listeners'] = self.conf['advertised.listeners']
        self.conf['auto_create_topics'] = self.conf.get(
            'auto_create_topics', 'true')
        self.dbg('Listeners: %s' % self.conf['listeners'])
        self.dbg('Advertised Listeners: %s' %
                 self.conf['advertised.listeners'])

        if len(sasl_mechs) > 0:
            self.dbg('SASL mechanisms: %s' % sasl_mechs)
            jaas_blob.append('KafkaServer {')

            conf_blob.append('sasl.enabled.mechanisms=%s' %
                             ','.join(sasl_mechs))
            # Handle PLAIN and SCRAM-.. the same way
            for mech in sasl_mechs:
                if mech.find('SCRAM') != -1:
                    plugin = 'scram.Scram'
                elif mech == 'PLAIN':
                    plugin = 'plain.Plain'
                else:
                    continue

                sasl_users = self.conf.get('sasl_users', '')
                if len(sasl_users) == 0:
                    self.log(('WARNING: No sasl_users configured for %s, '
                              'expected CSV of user=pass,..') % plugin)
                else:
                    jaas_blob.append(
                        ('org.apache.kafka.common.security.'
                         '%sLoginModule required debug=true') % plugin)
                    for up in sasl_users.split(','):
                        u, p = up.split('=')
                        if plugin == 'plain.Plain':
                            jaas_blob.append('  user_%s="%s"' % (u, p))
                        elif plugin == 'scram.Scram':
                            jaas_blob.append('  username="******" password="******"' %
                                             (u, p))
                            # SCRAM users are set up in ZK
                            # with kafka-configs.sh
                            self.post_start_cmds.append(
                                ('%s --zookeeper %s --alter --add-config '
                                 '\'%s=[iterations=4096,password=%s]\' '
                                 '--entity-type users --entity-name \'%s\'') %
                                (
                                    kafka_configs_sh,
                                    self.conf['zk_connect'],  # noqa: E501
                                    mech,
                                    p,
                                    u))

                    jaas_blob[-1] += ';'

            if 'GSSAPI' in sasl_mechs:
                conf_blob.append('sasl.kerberos.service.name=%s' % 'kafka')
                realm = self.conf.get('realm', None)
                if realm is None:
                    kdc = self.cluster.find_app(KerberosKdcApp)
                    self.conf['realm'] = kdc.conf.get('realm', None)
                else:
                    kdc = self.cluster.find_app(KerberosKdcApp,
                                                ('realm', realm))
                    # If a realm was specified it is most likely because
                    # we're operating in a cross-realm scenario.
                    # Add a principal mapping client principals without
                    # hostname ("admin" rather than "admin/localhost")
                    # to a local user.
                    # This is not compatible with "admin/localhost" principals.
                    conf_blob.append(
                        'authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer'
                    )  # noqa: E501
                    conf_blob.append('allow.everyone.if.no.acl.found=true')
                    conf_blob.append(
                        'sasl.kerberos.principal.to.local.rules=RULE:[1:admin](.*)s/^.*/admin/'
                    )  # noqa: E501

                assert kdc is not None, \
                    "No KerberosKdcApp found (realm={})".format(realm)
                self.env_add('KRB5_CONFIG', kdc.conf['krb5_conf'])
                self.env_add(
                    'KAFKA_OPTS',
                    '-Djava.security.krb5.conf=%s' % kdc.conf['krb5_conf'])
                self.env_add('KAFKA_OPTS', '-Dsun.security.krb5.debug=true')
                self.kerberos_principal, self.kerberos_keytab = kdc.add_principal(
                    'kafka', self.conf['advertised_hostname'])  # noqa: E501
                jaas_blob.append(
                    'com.sun.security.auth.module.Krb5LoginModule required'
                )  # noqa: E501
                jaas_blob.append(
                    'useKeyTab=true storeKey=true doNotPrompt=true'
                )  # noqa: E501
                jaas_blob.append('keyTab="%s"' % self.kerberos_keytab)
                jaas_blob.append('debug=true')
                jaas_blob.append('principal="%s";' % self.kerberos_principal)

            if 'OAUTHBEARER' in sasl_mechs:
                # Use the unsecure JSON web token.
                # Client should be configured with
                # 'sasl.oauthbearer.config=scope=requiredScope principal=admin'
                # Change requiredScope to something else to trigger auth error.
                conf_blob.append('super.users=User:admin')
                conf_blob.append('allow.everyone.if.no.acl.found=true')
                conf_blob.append(
                    'authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer'
                )  # noqa: E501
                jaas_blob.append(
                    'org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required'
                )  # noqa: E501
                jaas_blob.append('  unsecuredLoginLifetimeSeconds="3600"')
                jaas_blob.append('  unsecuredLoginStringClaim_sub="admin"')
                jaas_blob.append(
                    '  unsecuredValidatorRequiredScope="requiredScope"'
                )  # noqa: E501
                jaas_blob.append(';')

            jaas_blob.append('};\n')
            self.conf['jaas_file'] = self.create_file(
                'jaas_broker.conf', data='\n'.join(jaas_blob))
            self.env_add(
                'KAFKA_OPTS', '-Djava.security.auth.login.config=%s' %
                self.conf['jaas_file'])
            self.env_add('KAFKA_OPTS', '-Djava.security.debug=all')

        # SSL config and keys (et.al.)
        if ssl is not None:
            keystore, truststore, _, _ = ssl.create_keystore('broker%s' %
                                                             self.appid)
            conf_blob.append('ssl.protocol=TLS')
            conf_blob.append('ssl.enabled.protocols=TLSv1.2,TLSv1.1,TLSv1')
            conf_blob.append('ssl.keystore.type = JKS')
            conf_blob.append('ssl.keystore.location = %s' % keystore)
            conf_blob.append('ssl.keystore.password = %s ' %
                             ssl.conf.get('ssl_key_pass'))
            conf_blob.append('ssl.key.password = %s' %
                             ssl.conf.get('ssl_key_pass'))
            conf_blob.append('ssl.truststore.type = JKS')
            conf_blob.append('ssl.truststore.location = %s' % truststore)
            conf_blob.append('ssl.truststore.password = %s' %
                             ssl.conf.get('ssl_key_pass'))
            conf_blob.append('ssl.client.auth = %s' %
                             self.conf.get('ssl_client_auth', 'required'))

        # Generate config file
        self.conf['conf_file'] = self.create_file_from_template(
            'server.properties',  # noqa: E501
            self.conf,
            append_data=Template('\n'.join(conf_blob)).substitute(
                self.conf))  # noqa: E501

        # Generate LOG4J file (if app debug is enabled)
        if self.debug:
            self.conf['log4j_file'] = self.create_file_from_template(
                'log4j.properties', self.conf, subst=False)  # noqa: E501
            self.env_add(
                'KAFKA_LOG4J_OPTS',
                '-Dlog4j.configuration=file:%s' % self.conf['log4j_file'])

        self.env_add('LOG_DIR', self.mkpath('debug'))

        # Runs in foreground, stopped by Ctrl-C
        # This is the default for no-deploy use:
        # will be overwritten by deploy() if enabled.

        self.conf['start_cmd'] = '%s %s' % (start_sh, self.conf['conf_file'])
        self.conf['stop_cmd'] = None  # Ctrl-C
Example #6
0
    def __init__(self, cluster, conf=None, on=None):
        """
        @param cluster     Current cluster
        @param conf        Configuration dict, see below.
        @param on          Node name to run on

        Supported conf keys:
           * version - Confluent Platform version to use.
           * port_base - Low TCP port base to start allocating from (random)
           * image - docker image to use
           * conf - schema-registry docker image config strings (NOT USED)
        """
        super(SchemaRegistryApp, self).__init__(cluster, conf=conf, on=on)

        if self.conf.get('image', '') == '':
            self.conf['image'] = self.default_image

        self.conf['container_id'] = 'trivup_sr_%s' % str(uuid.uuid4())[0:7]
        kafka = cluster.find_app(KafkaBrokerApp)
        if kafka is None:
            raise Exception('KafkaBrokerApp required')

        bootstrap_servers = kafka.conf.get('docker_advertised_listeners')

        if bootstrap_servers is None:
            raise Exception('KafkaBrokerApp required')

        # Create listener
        port = trivup.TcpPortAllocator(self.cluster).next(
            self, self.conf.get('port_base', None))

        docker_args = ''
        if cluster.platform == 'linux':
            # Let container bind to host localhost
            self.conf['extport'] = port
            self.conf['intport'] = port
            docker_args = '--network=host'

        elif cluster.platform == 'darwin':
            # On OSX localhost binds are not possible, so set up a
            # port forwarding.
            self.conf['extport'] = port
            self.conf['intport'] = 8081
            docker_args = '-p %d:%d' % (self.conf['extport'],
                                        self.conf['intport'])

        # This is the listener address inside the docker container
        self.conf['listeners'] = 'http://0.0.0.0:%d' % self.conf.get('intport')
        # This is the listener address outside the docker container,
        # using port-forwarding
        self.conf['url'] = 'http://localhost:%d' % self.conf['extport']

        # Run in foreground.
        self.conf[
            'start_cmd'] = 'docker run %s --name %s -e SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS=%s  -e SCHEMA_REGISTRY_HOST_NAME=localhost   -e SCHEMA_REGISTRY_LISTENERS=%s  -e SCHEMA_REGISTRY_DEBUG=true %s' % (  # noqa: E501
                docker_args, self.conf.get('container_id'), bootstrap_servers,
                self.conf.get('listeners'), self.conf.get('image'))

        # Stop through docker
        self.conf['stop_cmd'] = 'docker stop %s' % \
                                self.conf.get('container_id')
Example #7
0
    def __init__(self, cluster, conf=None, on=None):
        """
        @param cluster     Current cluster
        @param conf        Configuration dict, see below.
        @param on          Node name to run on

        Supported conf keys:
           * version - Kafka version to use, will build 'trunk' from kafka_path,
                       otherwise the version is taken to be a formal release which
                       will be downloaded and deployed.
           * listeners - CSV list of listener types: PLAINTEXT,SSL,SASL,SASL_SSL
           * listener_host - alternative listener host instead of node name (e.g., '*')
           * advertised_hostname - hostname to use for advertised.listeners (defaults to 'on' node)
           * sasl_mechanisms - CSV list of SASL mechanisms to enable: GSSAPI,PLAIN,SCRAM-SHA-n
                               SASL listeners will be added automatically.
                               KerberosKdcApp is required for GSSAPI.
           * sasl_users - CSV list of SASL PLAIN/SCRAM of user=pass for authenticating clients
           * ssl_client_auth - ssl.client.auth broker property (def: required)
           * num_partitions - Topic auto-create partition count (3)
           * replication_Factor - Topic auto-create replication factor (1)
           * port_base - Low TCP port base to start allocating from (random)
           * kafka_path - Path to Kafka build tree (for trunk usage)
           * fdlimit - RLIMIT_NOFILE (or "max") (default: max)
           * conf - arbitary server.properties config as a list of strings.
        """
        super(KafkaBrokerApp, self).__init__(cluster, conf=conf, on=on)

        self.zk = cluster.find_app('ZookeeperApp')
        if self.zk is None:
            raise Exception('ZookeeperApp required')

        # Kafka repo uses SVN nomenclature
        if self.conf['version'] == 'master':
            self.conf['version'] = 'trunk'

        if 'fdlimit' not in self.conf:
            self.conf['fdlimit'] = 'max'

        listener_host = self.conf.get('listener_host',
                                      self.conf.get('nodename'))
        # Kafka Configuration properties
        self.conf['log_dirs'] = self.create_dir('logs')
        if 'num_partitions' not in self.conf:
            self.conf['num_partitions'] = 3
        self.conf['zk_connect'] = self.zk.get('address', None)
        if 'replication_factor' not in self.conf:
            self.conf['replication_factor'] = 1

        # Kafka paths
        if self.conf.get('kafka_path', None) is not None:
            self.conf['destdir'] = self.conf['kafka_path']
            self.conf['bindir'] = os.path.join(self.conf['destdir'], 'bin')
            start_sh = os.path.join(self.conf['bindir'],
                                    'kafka-server-start.sh')
            kafka_configs_sh = os.path.join(self.conf['bindir'],
                                            'kafka-configs.sh')
        else:
            start_sh = 'kafka-server-start.sh'
            kafka_configs_sh = 'kafka-configs.sh'

        # Arbitrary (non-template) configuration statements
        conf_blob = self.conf.get('conf', list())
        jaas_blob = list()

        #
        # Configure listeners, SSL and SASL
        #
        listeners = self.conf.get('listeners', 'PLAINTEXT').split(',')

        # SASL support
        sasl_mechs = [
            x for x in self.conf.get('sasl_mechanisms', '').replace(
                ' ', '').split(',') if len(x) > 0
        ]
        if len(sasl_mechs) > 0:
            listeners.append('SASL_PLAINTEXT')

        # SSL support
        if getattr(cluster, 'ssl', None) is not None:
            # Add SSL listeners
            listeners.append('SSL')
            if len(sasl_mechs) > 0:
                listeners.append('SASL_SSL')

        # Create listeners
        ports = [(x, trivup.TcpPortAllocator(self.cluster).next(
            self, self.conf.get('port_base', None)))
                 for x in sorted(set(listeners))]
        self.conf['port'] = ports[0][1]  # "Default" port
        self.conf['address'] = '%s:%d' % (listener_host, self.conf['port'])
        self.conf['listeners'] = ','.join(
            ['%s://%s:%d' % (x[0], listener_host, x[1]) for x in ports])
        if 'advertised_hostname' not in self.conf:
            self.conf['advertised_hostname'] = self.conf['nodename']
        self.conf['advertised.listeners'] = ','.join([
            '%s://%s:%d' % (x[0], self.conf['advertised_hostname'], x[1])
            for x in ports
        ])
        self.conf['auto_create_topics'] = self.conf.get(
            'auto_create_topics', 'true')
        self.dbg('Listeners: %s' % self.conf['listeners'])
        self.dbg('Advertised Listeners: %s' %
                 self.conf['advertised.listeners'])

        if len(sasl_mechs) > 0:
            self.dbg('SASL mechanisms: %s' % sasl_mechs)
            jaas_blob.append('KafkaServer {')

            conf_blob.append('sasl.enabled.mechanisms=%s' %
                             ','.join(sasl_mechs))
            # Handle PLAIN and SCRAM-.. the same way
            for mech in sasl_mechs:
                if mech.find('SCRAM') != -1:
                    plugin = 'scram.Scram'
                elif mech == 'PLAIN':
                    plugin = 'plain.Plain'
                else:
                    continue

                sasl_users = self.conf.get('sasl_users', '')
                if len(sasl_users) == 0:
                    self.log(
                        'WARNING: No sasl_users configured for %s, expected CSV of user=pass,..'
                        % plugin)
                else:
                    jaas_blob.append(
                        'org.apache.kafka.common.security.%sLoginModule required debug=true'
                        % plugin)
                    for up in sasl_users.split(','):
                        u, p = up.split('=')
                        if plugin is 'plain.Plain':
                            jaas_blob.append('  user_%s="%s"' % (u, p))
                        elif plugin is 'scram.Scram':
                            jaas_blob.append('  username="******" password="******"' %
                                             (u, p))
                            # SCRAM users are set up in ZK with kafka-configs.sh
                            self.post_start_cmds.append('%s --zookeeper %s --alter --add-config \'%s=[iterations=4096,password=%s]\' --entity-type users --entity-name \'%s\'' % \
                                                        (kafka_configs_sh, self.conf['zk_connect'], mech, p, u))

                    jaas_blob[-1] += ';'

            if 'GSSAPI' in sasl_mechs:
                conf_blob.append('sasl.kerberos.service.name=%s' % 'kafka')
                kdc = self.cluster.find_app(KerberosKdcApp)
                self.env_add('KRB5_CONFIG', kdc.conf['krb5_conf'])
                self.env_add(
                    'KAFKA_OPTS',
                    '-Djava.security.krb5.conf=%s' % kdc.conf['krb5_conf'])
                self.env_add('KAFKA_OPTS', '-Dsun.security.krb5.debug=true')
                self.kerberos_principal, self.kerberos_keytab = kdc.add_principal(
                    'kafka', self.conf['advertised_hostname'])
                jaas_blob.append(
                    'com.sun.security.auth.module.Krb5LoginModule required')
                jaas_blob.append(
                    'useKeyTab=true storeKey=true doNotPrompt=true')
                jaas_blob.append('keyTab="%s"' % self.kerberos_keytab)
                jaas_blob.append('debug=true')
                jaas_blob.append('principal="%s";' % self.kerberos_principal)

            jaas_blob.append('};\n')
            self.conf['jaas_file'] = self.create_file(
                'jaas_broker.conf', data='\n'.join(jaas_blob))
            self.env_add(
                'KAFKA_OPTS', '-Djava.security.auth.login.config=%s' %
                self.conf['jaas_file'])
            self.env_add('KAFKA_OPTS', '-Djava.security.debug=all')

        # SSL config and keys (et.al.)
        if getattr(cluster, 'ssl', None) is not None:
            ssl = cluster.ssl
            keystore, truststore, _, _ = ssl.create_keystore('broker%s' %
                                                             self.appid)
            conf_blob.append('ssl.protocol=TLS')
            conf_blob.append('ssl.enabled.protocols=TLSv1.2,TLSv1.1,TLSv1')
            conf_blob.append('ssl.keystore.type = JKS')
            conf_blob.append('ssl.keystore.location = %s' % keystore)
            conf_blob.append('ssl.keystore.password = %s ' %
                             ssl.conf.get('ssl_key_pass'))
            conf_blob.append('ssl.key.password = %s' %
                             ssl.conf.get('ssl_key_pass'))
            conf_blob.append('ssl.truststore.type = JKS')
            conf_blob.append('ssl.truststore.location = %s' % truststore)
            conf_blob.append('ssl.truststore.password = %s' %
                             ssl.conf.get('ssl_key_pass'))
            conf_blob.append('ssl.client.auth = %s' %
                             self.conf.get('ssl_client_auth', 'required'))

        # Generate config file
        self.conf['conf_file'] = self.create_file_from_template(
            'server.properties', self.conf, append_data='\n'.join(conf_blob))

        # Generate LOG4J file (if app debug is enabled)
        if self.debug:
            self.conf['log4j_file'] = self.create_file_from_template(
                'log4j.properties', self.conf, subst=False)
            self.env_add(
                'KAFKA_LOG4J_OPTS',
                '-Dlog4j.configuration=file:%s' % self.conf['log4j_file'])

        self.env_add('LOG_DIR', self.mkpath('debug'))

        # Runs in foreground, stopped by Ctrl-C
        # This is the default for no-deploy use: will be overwritten by deploy() if enabled.

        self.conf['start_cmd'] = '%s %s' % (start_sh, self.conf['conf_file'])
        self.conf['stop_cmd'] = None  # Ctrl-C