Exemplo n.º 1
0
    def __init__(self, broker_cnt=3, kafka_version='2.3.0'):
        super(KafkaCluster, self).__init__()

        self.cluster = Cluster('KafkaCluster',
                               root_path=os.environ.get('TRIVUP_ROOT', 'tmp'),
                               debug=True)

        self.apps = dict()

        # Create a single ZK for the cluster
        ZookeeperApp(self.cluster)

        # Create broker_cnt brokers
        brokerconf = {
            'replication_factor': min(3, int(broker_cnt)),
            'num_partitions': 4,
            'version': kafka_version
        }

        self.brokers = dict()
        for n in range(0, broker_cnt):
            broker = KafkaBrokerApp(self.cluster, brokerconf)
            self.brokers[broker.appid] = broker

        # Get bootstrap server list
        security_protocol = 'PLAINTEXT'
        all_listeners = (','.join(
            self.cluster.get_all('listeners', '', KafkaBrokerApp))).split(',')
        bootstrap_servers = ','.join(
            [x for x in all_listeners if x.startswith(security_protocol)])

        # Create client base configuration
        self._client_config = {
            'bootstrap.servers': bootstrap_servers,
            'broker.address.family': 'v4'
        }

        self.cluster.deploy()
Exemplo n.º 2
0
def test_Ssl():
    cluster = Cluster('TestCluster', 'tmp', debug=True)

    # SSL App
    ssl = SslApp(cluster)

    a = ssl.create_keystore('mybroker')
    print('created keystore: %s' % (a, ))

    r = ssl.create_cert('myclient')
    print('created key: %s' % r)

    r = ssl.create_cert('selfsigned_myclient', with_ca=False)
    print('created key: %s' % r)

    r = ssl.create_cert('intermediate_myclient', through_intermediate=True)
    print('created key: %s' % r)

    r = ssl.create_cert('selfsigned_intermediate_myclient',
                        with_ca=False,
                        through_intermediate=True)
    print('created key: %s' % r)

    cluster.cleanup()
Exemplo n.º 3
0
    def __init__(self, **kwargs):
        """ Create and start a KafkaCluster.
            See default_conf above for parameters. """
        super(KafkaCluster, self).__init__()

        conf = kwargs
        self.conf = deepcopy(self.default_conf)
        if conf is not None:
            self.conf.update(conf)

        self.version = self.conf.get('version')
        self.version_num = [int(x) for x in self.version.split('.')][:3]
        self.kraft = self.conf.get('kraft')

        # Create trivup Cluster
        self.cluster = Cluster(self.__class__.__name__,
                               os.environ.get(
                                   'TRIVUP_ROOT',
                                   'tmp-%s' % self.__class__.__name__),
                               debug=bool(self.conf.get('debug', False)),
                               cleanup=bool(self.conf.get('cleanup', True)))

        self._client_conf = dict()
        self.env = dict()

        self.sasl_mechanism = self.conf.get('sasl_mechanism')

        # Add OIDC server app
        if bool(self.conf.get('oidc', False)):
            self.oidc = OauthbearerOIDCApp(self.cluster)
            if not self.sasl_mechanism:
                self.sasl_mechanism = 'OAUTHBEARER'
            elif self.sasl_mechanism.upper() != 'OAUTHBEARER':
                raise RuntimeError(
                    f"OIDC requires sasl.mechanism OAUTHBEARER, not '{self.sasl_mechanism}'"
                )

        # Generate SSL certs if enabled
        if bool(self.conf.get('with_ssl')):
            self.ssl = SslApp(self.cluster, self.conf)
        else:
            self.ssl = None

        # Map mechanism and SSL to security protocol
        self.security_protocol = {
            (True, True): 'SASL_SSL',
            (True, False): 'SASL_PLAINTEXT',
            (False, True): 'SSL',
            (False, False): 'PLAINTEXT'
        }[(bool(self.sasl_mechanism), bool(self.ssl is not None))]

        if not self.kraft:
            # Create single ZK for the cluster (don't start yet)
            self.zk = ZookeeperApp(self.cluster)
        else:
            self.zk = None
            # Allocate (but don't use) a dummy appid so that the brokers get
            # the same appid/nodeid for both KRaft and ZK modes.
            Allocator(self.cluster).next()

        # Broker configuration
        broker_cnt = int(self.conf.get('broker_cnt'))
        self.broker_conf = {
            'replication_factor': min(3, broker_cnt),
            'num_partitions': 4,
            'version': self.version,
            'sasl_mechanisms': self.sasl_mechanism,
            'sasl_users': self.conf.get('sasl_users'),
            'conf': self.conf.get('broker_conf', []),
            'kafka_path': self.conf.get('kafka_path', None)
        }

        # Start Kerberos KDCs if GSSAPI (Kerberos) is configured
        if self.sasl_mechanism == 'GSSAPI':
            self._setup_kerberos()
            self.broker_conf['realm'] = self.broker_realm

        self.broker_conf['listener_host'] = 'localhost'
        # Create brokers (don't start yet)
        self.brokers = dict()
        for n in range(0, broker_cnt):
            bconf = copy.deepcopy(self.broker_conf)
            if self.version_num >= [2, 4, 0]:
                # Configure rack & replica selector if broker supports
                # fetch-from-follower
                bconf.update({
                    'conf': [
                        'broker.rack=RACK${appid}',
                        'replica.selector.class=org.apache.kafka.common.replica.RackAwareReplicaSelector'
                    ]
                })  # noqa: E501

            broker = KafkaBrokerApp(self.cluster, bconf)
            self.brokers[broker.appid] = broker

        # Generate bootstrap servers list
        all_listeners = (','.join(
            self.cluster.get_all('advertised_listeners', '',
                                 KafkaBrokerApp))).split(',')
        self.bootstrap_servers = ','.join(
            [x for x in all_listeners if x.startswith(self.security_protocol)])

        assert len(self.bootstrap_servers) >= broker_cnt, \
            "{} < {} expected bootstrap servers".format(
                len(self.bootstrap_servers), broker_cnt)

        # Create SchemaRegistry if enabled
        if bool(self.conf.get('with_sr', False)):
            self.sr = SchemaRegistryApp(
                self.cluster, {'version': self.conf.get('cp_version')})
            self.env['SR_URL'] = self.sr.get('url')

        # Create librdkafka client configuration
        self._setup_client_conf()

        # Deploy cluster
        self.cluster.deploy()

        # Start cluster
        self.start()
Exemplo n.º 4
0
class KafkaCluster(object):
    # conf dict structure with defaults:
    # commented-out fields are not defaults but show what is available.
    default_conf = {
        'version': '2.8.0',  # Apache Kafka version
        'cp_version': '6.1.0',  # Confluent Platform version (for SR)
        'broker_cnt': 3,
        'sasl_mechanism': '',  # GSSAPI, PLAIN, SCRAM-.., ...
        'realm_cnt': 1,
        'krb_renew_lifetime': 30,
        'krb_ticket_lifetime': 120,
        # KRaft (No zookeeper). Requires AK >=2.8
        'kraft': False,
        # SASL PLAIN/SCRAM
        'sasl_users': 'testuser=testpass',
        # With SSL
        'with_ssl': False,
        # With SchemaRegistry
        'with_sr': False,
        # Debug trivup
        'debug': False,
        # Cleanup
        'cleanup': True,
        'oidc': False,
        # Additional broker server.properties configuration
        # 'broker_conf': ['connections.max.idle.ms=1234', ..]
    }

    def __init__(self, **kwargs):
        """ Create and start a KafkaCluster.
            See default_conf above for parameters. """
        super(KafkaCluster, self).__init__()

        conf = kwargs
        self.conf = deepcopy(self.default_conf)
        if conf is not None:
            self.conf.update(conf)

        self.version = self.conf.get('version')
        self.version_num = [int(x) for x in self.version.split('.')][:3]
        self.kraft = self.conf.get('kraft')

        # Create trivup Cluster
        self.cluster = Cluster(self.__class__.__name__,
                               os.environ.get(
                                   'TRIVUP_ROOT',
                                   'tmp-%s' % self.__class__.__name__),
                               debug=bool(self.conf.get('debug', False)),
                               cleanup=bool(self.conf.get('cleanup', True)))

        self._client_conf = dict()
        self.env = dict()

        self.sasl_mechanism = self.conf.get('sasl_mechanism')

        # Add OIDC server app
        if bool(self.conf.get('oidc', False)):
            self.oidc = OauthbearerOIDCApp(self.cluster)
            if not self.sasl_mechanism:
                self.sasl_mechanism = 'OAUTHBEARER'
            elif self.sasl_mechanism.upper() != 'OAUTHBEARER':
                raise RuntimeError(
                    f"OIDC requires sasl.mechanism OAUTHBEARER, not '{self.sasl_mechanism}'"
                )

        # Generate SSL certs if enabled
        if bool(self.conf.get('with_ssl')):
            self.ssl = SslApp(self.cluster, self.conf)
        else:
            self.ssl = None

        # Map mechanism and SSL to security protocol
        self.security_protocol = {
            (True, True): 'SASL_SSL',
            (True, False): 'SASL_PLAINTEXT',
            (False, True): 'SSL',
            (False, False): 'PLAINTEXT'
        }[(bool(self.sasl_mechanism), bool(self.ssl is not None))]

        if not self.kraft:
            # Create single ZK for the cluster (don't start yet)
            self.zk = ZookeeperApp(self.cluster)
        else:
            self.zk = None
            # Allocate (but don't use) a dummy appid so that the brokers get
            # the same appid/nodeid for both KRaft and ZK modes.
            Allocator(self.cluster).next()

        # Broker configuration
        broker_cnt = int(self.conf.get('broker_cnt'))
        self.broker_conf = {
            'replication_factor': min(3, broker_cnt),
            'num_partitions': 4,
            'version': self.version,
            'sasl_mechanisms': self.sasl_mechanism,
            'sasl_users': self.conf.get('sasl_users'),
            'conf': self.conf.get('broker_conf', []),
            'kafka_path': self.conf.get('kafka_path', None)
        }

        # Start Kerberos KDCs if GSSAPI (Kerberos) is configured
        if self.sasl_mechanism == 'GSSAPI':
            self._setup_kerberos()
            self.broker_conf['realm'] = self.broker_realm

        self.broker_conf['listener_host'] = 'localhost'
        # Create brokers (don't start yet)
        self.brokers = dict()
        for n in range(0, broker_cnt):
            bconf = copy.deepcopy(self.broker_conf)
            if self.version_num >= [2, 4, 0]:
                # Configure rack & replica selector if broker supports
                # fetch-from-follower
                bconf.update({
                    'conf': [
                        'broker.rack=RACK${appid}',
                        'replica.selector.class=org.apache.kafka.common.replica.RackAwareReplicaSelector'
                    ]
                })  # noqa: E501

            broker = KafkaBrokerApp(self.cluster, bconf)
            self.brokers[broker.appid] = broker

        # Generate bootstrap servers list
        all_listeners = (','.join(
            self.cluster.get_all('advertised_listeners', '',
                                 KafkaBrokerApp))).split(',')
        self.bootstrap_servers = ','.join(
            [x for x in all_listeners if x.startswith(self.security_protocol)])

        assert len(self.bootstrap_servers) >= broker_cnt, \
            "{} < {} expected bootstrap servers".format(
                len(self.bootstrap_servers), broker_cnt)

        # Create SchemaRegistry if enabled
        if bool(self.conf.get('with_sr', False)):
            self.sr = SchemaRegistryApp(
                self.cluster, {'version': self.conf.get('cp_version')})
            self.env['SR_URL'] = self.sr.get('url')

        # Create librdkafka client configuration
        self._setup_client_conf()

        # Deploy cluster
        self.cluster.deploy()

        # Start cluster
        self.start()

    def __del__(self):
        """ Destructor: forcibly stop the cluster """
        self.stop(force=True)

    def _setup_env(self):
        """ Set up convenience envs """
        self.env['KAFKA_PATH'] = self.cluster.find_app(KafkaBrokerApp).get(
            'destdir')  # noqa: E501
        if not self.kraft:
            self.env['ZK_ADDRESS'] = self.zk.get('address')
        self.env['BROKERS'] = self.bootstrap_servers
        self.env['KAFKA_VERSION'] = self.version
        self.env['TRIVUP_ROOT'] = self.cluster.instance_path()

        # Add each broker pid as an env so they can be killed indivdidually.
        for b in self.cluster.find_apps(KafkaBrokerApp, 'started'):
            self.env['BROKER_PID_%d' % b.appid] = str(b.proc.pid)

    def _setup_client_conf(self):
        """ Set up librdkafka client configuration """
        self._client_conf['bootstrap.servers'] = self.bootstrap_servers
        self._client_conf['broker.address.family'] = 'v4'
        if self.security_protocol != 'PLAINTEXT':
            self._client_conf['security.protocol'] = self.security_protocol

        broker_version = self.conf.get('version')
        brver = broker_version.split('.')
        if brver[0] == 0 and brver[1] < 10:
            self._client_conf['broker.version.fallback'] = broker_version
            self._client_conf['api.version.request'] = 'false'

        # Client SASL configuration
        if self.sasl_mechanism:
            self._client_conf['sasl.mechanism'] = self.sasl_mechanism

            if self.sasl_mechanism == 'PLAIN' or \
               self.sasl_mechanism.find('SCRAM') != -1:
                # Use first user as SASL user/pass
                for up in self.conf.get('sasl_users', '').split(','):
                    u, p = up.split('=')
                    self._client_conf['sasl.username'] = u
                    self._client_conf['sasl.password'] = p
                    break

            elif self.sasl_mechanism == 'OAUTHBEARER':
                if self.oidc is not None:
                    self._client_conf['sasl.oauthbearer.method'] = 'OIDC'
                    self._client_conf[
                        'sasl.oauthbearer.token.endpoint.url'] = self.oidc.get(
                            'valid_url')  # noqa: E501
                    self._client_conf['sasl.oauthbearer.client.id'] = '123'
                    self._client_conf['sasl.oauthbearer.client.secret'] = 'abc'
                    self._client_conf['sasl.oauthbearer.scope'] = 'test'
                    self._client_conf['sasl.oauthbearer.extensions'] = \
                        'ExtensionworkloadIdentity=develC348S,Extensioncluster=lkc123'
                else:
                    self._client_conf[
                        'enable.sasl.oauthbearer.unsecure.jwt'] = True  # noqa: E501
                    self._client_conf['sasl.oauthbearer.config'] = \
                        'scope=requiredScope principal=admin'

        # Client SSL configuration
        if self.ssl is not None:
            key = self.ssl.create_cert('client')
            self._client_conf['ssl.ca.location'] = self.ssl.ca['pem']
            self._client_conf['ssl.certificate.location'] = key['pub']['pem']
            self._client_conf['ssl.key.location'] = key['priv']['pem']
            self._client_conf['ssl.key.password'] = key['password']

            # Add envs pointing out locations of the generated certs
            for k, v in self.ssl.ca.items():
                self.env['SSL_ca_{}'.format(k)] = v

            # Set envs for all generated keys so tests can find them.
            for k, v in key.items():
                if type(v) is dict:
                    for k2, v2 in v.items():
                        # E.g. "SSL_priv_der=path/to/librdkafka-priv.der"
                        self.env['SSL_{}_{}'.format(k, k2)] = v2
                else:
                    self.env['SSL_{}'.format(k)] = v

    def _setup_kerberos(self):
        """ Set up Kerberos KDCs """

        # Create KDCs for each realm.
        # First realm will be the default / broker realm.
        #
        realm_cnt = int(self.conf.get('realm_cnt', 1))
        # No point in having more than two realms
        assert realm_cnt > 0 and realm_cnt < 3
        realms = ['REALM{}.TRIVUP'.format(x + 1) for x in range(0, realm_cnt)]

        # Pre-Allocate ports for the KDCs so they can reference eachother
        # in the krb5.conf configuration.
        kdc_ports = {
            x: TcpPortAllocator(self.cluster).next("dummy")
            for x in realms
        }

        # Set up realm=kdc:port cross-realm mappings
        cross_realms = ",".join([
            "{}={}:{}".format(x,
                              self.cluster.get_node().name, kdc_ports[x])
            for x in realms
        ])

        kdcs = dict()
        for realm in realms:
            kdc = KerberosKdcApp(self.cluster,
                                 realm,
                                 conf={
                                     'port':
                                     kdc_ports[realm],
                                     'cross_realms':
                                     cross_realms,
                                     'renew_lifetime':
                                     int(self.conf.get('krb_renew_lifetime')),
                                     'ticket_lifetime':
                                     int(self.conf.get('krb_ticket_lifetime'))
                                 })
            # Kerberos needs to be started prior to Kafka so that principals
            # and keytabs are available at the time of Kafka config generation.
            kdc.start()
            kdcs[realm] = kdc

        self.broker_realm = realms[0]
        self.client_realm = realms[-1]
        self.broker_kdc = kdcs[self.broker_realm]
        self.client_kdc = kdcs[self.client_realm]

        # Add cross-realm TGTs
        if realm_cnt > 1:
            KerberosKdcApp.add_cross_realm_tgts(kdcs)

        # Add client envs and configuration
        self.env['KRB5CCNAME'] = self.client_kdc.mkpath('krb5cc')
        self.env['KRB5_CONFIG'] = self.client_kdc.conf['krb5_conf']
        self.env['KRB5_KDC_PROFILE'] = self.client_kdc.conf['kdc_conf']
        principal, keytab = self.client_kdc.add_principal('admin')

        self._client_conf['sasl.kerberos.keytab'] = keytab
        self._client_conf['sasl.kerberos.principal'] = principal.split('@')[0]
        # Refresh ticket 60s before renew timeout.
        self._client_conf['sasl.kerberos.min.time.before.relogin'] = \
            max(1, int(self.conf.get('krb_renew_lifetime')) - 60) * 1000

    def start(self, timeout=0):
        """ Start cluster """
        self.cluster.start()

        # Set up additional convenience envs
        self._setup_env()

        if timeout > 0:
            self.wait_operational(timeout)

    def stop(self, cleanup=True, keeptypes=['log'], force=False, timeout=0):
        """ Stop cluster and clean up """
        self.cluster.stop(force=True)
        if timeout > 0:
            self.cluster.wait_stopped(timeout)
        if cleanup:
            self.cluster.cleanup(keeptypes)

    def stopped(self):
        """ Returns True when all components of the cluster are stopped """
        return len([x for x in self.cluster.apps
                    if x.status() == 'stopped']) == len(self.cluster.apps)

    def wait_operational(self, timeout=60):
        """ Wait for cluster to go operational """
        if not self.cluster.wait_operational(timeout):
            self.stop(force=True)
            raise Exception(
                "Cluster {} did not go operational, see logs in {}/{}".format(
                    self.cluster.name, self.cluster.root_path,
                    self.cluster.instance))

    def stop_broker(self, broker_id):
        """ Stop single broker """
        broker = self.brokers.get(broker_id, None)
        if broker is None:
            raise LookupError("Unknown broker id {}".format(broker_id))
        broker.stop()

    def start_broker(self, broker_id):
        """ Start single broker """
        broker = self.brokers.get(broker_id, None)
        if broker is None:
            raise LookupError("Unknown broker id {}".format(broker_id))
        broker.start()

    def interactive(self, cmd=None):
        """ Execute an interactive shell that has all the
            environment variables set. """

        if cmd is None:
            print('# Interactive mode')

        print("# - Waiting for cluster to go operational in {}/{}".format(
            self.cluster.root_path, self.cluster.instance))
        kc.wait_operational()

        env = self.env.copy()

        # Avoids 'dumb' terminal mode
        env['TERM'] = os.environ.get('TERM', 'vt100')

        # Write librdkafka client configuration to file.
        cf_path = self.cluster.mkpath('rdkafka.conf', in_instance=True)
        self.write_client_conf(cf_path)
        env['RDKAFKA_TEST_CONF'] = cf_path
        print("# - Client configuration in {}".format(cf_path))
        print("# - Connect to cluster with bootstrap.servers {}".format(
            self.bootstrap_servers))

        # Prefix the standard prompt with cluster info.
        if cmd is None:
            pfx = '[TRIVUP:{}@{}] '.format(self.cluster.name, self.version)
            fullcmd = 'bash --rcfile <(cat ~/.bashrc; echo \'PS1="{}$PS1"\') -i'.format(
                pfx)  # noqa: E501
            print("# - You're now in an interactive sub-shell, type 'exit' "
                  "to stop the cluster and exit back to your shell.\n")
            retcode = subprocess.call(fullcmd,
                                      env=env,
                                      shell=True,
                                      executable='/bin/bash')

        else:
            fullcmd = cmd
            print("# - Executing: {}".format(fullcmd))
            retcode = subprocess.call(fullcmd, env=env, shell=True)

        if retcode != 0:
            print("# - Shell exited with returncode {}: {}".format(
                retcode, fullcmd))

    def client_conf(self):
        """ Get a dict copy of the client configuration """
        return deepcopy(self._client_conf)

    def write_client_conf(self, path, additional_blob=None):
        """ Write client configuration (librdkafka) to @param path """
        with open(path, "w") as f:
            for k, v in self._client_conf.items():
                f.write(str('%s=%s\n' % (k, v)))
            if additional_blob is not None:
                f.write(str('#\n# Additional configuration:'))
                f.write(str(additional_blob))
def test_version (version):
    """
    @brief Create, deploy and start a Kafka cluster using Kafka \p version
    Then run librdkafka's regression tests.
    """
    
    cluster = Cluster('librdkafkaBrokerVersionTests', 'tmp')

    # One ZK (from Kafka repo)
    zk1 = ZookeeperApp(cluster, bin_path=kafka_path + '/bin/zookeeper-server-start.sh')
    zk_address = zk1.get('address')

    # Two brokers
    conf = {'replication_factor': 3, 'num_partitions': 4, 'version': version}
    broker1 = KafkaBrokerApp(cluster, conf, kafka_path=kafka_path)
    broker2 = KafkaBrokerApp(cluster, conf, kafka_path=kafka_path)
    broker3 = KafkaBrokerApp(cluster, conf, kafka_path=kafka_path)
    bootstrap_servers = ','.join(cluster.get_all('address','',KafkaBrokerApp))

    # Generate test config file
    fd, test_conf_file = tempfile.mkstemp(prefix='test_conf', text=True)
    os.write(fd, 'bootstrap.servers=%s\n' % bootstrap_servers)
    os.close(fd)

    print('# Deploying cluster')
    cluster.deploy()

    print('# Starting cluster')
    cluster.start()

    print('# Waiting for brokers to come up')

    if not cluster.wait_operational(30):
        raise TimeoutError('Cluster did not go operational')

    print('# Connect to cluster with bootstrap.servers %s' % bootstrap_servers)

        
    print('\033[32mCluster started.. Executing librdkafka tests\033[0m')
    r = subprocess.call('TEST_LEVEL=%d RDKAFKA_TEST_CONF=%s ZK_ADDRESS=%s make' % (test_level, test_conf_file, zk_address), shell=True)
    if r == 0:
        print('\033[37;42mTests PASSED on broker version %s\033[0m' % version)
        ret = True
    else:
        print('\033[33;41mTests FAILED on broker version %s (ret %d)\033[0m' % (version, r))
        ret = False

    os.remove(test_conf_file)

    cluster.stop(force=True)

    cluster.cleanup(keeptypes=['log'])
    return ret
Exemplo n.º 6
0
class KafkaCluster(object):
    def __init__(self, broker_cnt=3, kafka_version='2.3.0'):
        super(KafkaCluster, self).__init__()

        self.cluster = Cluster('KafkaCluster',
                               root_path=os.environ.get('TRIVUP_ROOT', 'tmp'),
                               debug=True)

        self.apps = dict()

        # Create a single ZK for the cluster
        ZookeeperApp(self.cluster)

        # Create broker_cnt brokers
        brokerconf = {
            'replication_factor': min(3, int(broker_cnt)),
            'num_partitions': 4,
            'version': kafka_version
        }

        self.brokers = dict()
        for n in range(0, broker_cnt):
            broker = KafkaBrokerApp(self.cluster, brokerconf)
            self.brokers[broker.appid] = broker

        # Get bootstrap server list
        security_protocol = 'PLAINTEXT'
        all_listeners = (','.join(
            self.cluster.get_all('listeners', '', KafkaBrokerApp))).split(',')
        bootstrap_servers = ','.join(
            [x for x in all_listeners if x.startswith(security_protocol)])

        # Create client base configuration
        self._client_config = {
            'bootstrap.servers': bootstrap_servers,
            'broker.address.family': 'v4'
        }

        self.cluster.deploy()

    def client_config(self):
        return self._client_config.copy()

    def start(self):
        self.cluster.start()
        if not self.cluster.wait_operational(30):
            self.cluster.stop(force=True)
            raise Exception("Cluster {} did not go operational, "
                            "see logs in {}".format(
                                self.cluster.name,
                                self.cluster.instance_path()))

    def stop(self, cleanup=True):
        self.cluster.stop()
        if cleanup:
            self.cluster.cleanup(keeptypes=['log'])

    def stopped(self):
        """ Returns True when all components of the cluster are stopped """
        return len([x for x in self.cluster.apps
                    if x.status() == 'stopped']) == len(self.cluster.apps)

    def stop_broker(self, broker_id):
        broker = self.brokers.get(broker_id, None)
        if broker is None:
            raise LookupError("Unknown broker id {}".format(broker_id))
        broker.stop()

    def start_broker(self, broker_id):
        broker = self.brokers.get(broker_id, None)
        if broker is None:
            raise LookupError("Unknown broker id {}".format(broker_id))
        broker.start()
def test_kerberos_cross_realm():
    """ Test Kerberos cross-realm trusts """
    topic = "test"

    cluster = Cluster('KafkaCluster',
                      root_path=os.environ.get('TRIVUP_ROOT', 'tmp'),
                      debug=True)

    ZookeeperApp(cluster)

    #
    # Create KDCs for each realm.
    # First realm will be the default / broker realm.
    #
    realm_cnt = 2
    realms = ["REALM{}.COM".format(x + 1) for x in range(0, realm_cnt)]

    # Pre-Allocate ports for the KDCs so they can reference eachother
    # in the krb5.conf configuration.
    kdc_ports = {x: TcpPortAllocator(cluster).next("dummy") for x in realms}

    # Set up realm=kdc:port cross-realm mappings
    cross_realms = ",".join(["{}={}:{}".format(x, cluster.get_node().name, kdc_ports[x]) for x in realms])

    kdcs = dict()
    for realm in realms:
        kdc = KerberosKdcApp(cluster, realm,
                             conf={'port': kdc_ports[realm],
                                   'cross_realms': cross_realms,
                                   'renew_lifetime': '30',
                                   'ticket_lifetime': '120'})
        kdc.start()
        kdcs[realm] = kdc

    broker_realm = realms[0]
    client_realm = realms[1]
    broker_kdc = kdcs[broker_realm]
    client_kdc = kdcs[client_realm]

    # Create broker_cnt brokers
    broker_cnt = 4
    brokerconf = {'replication_factor': min(3, int(broker_cnt)),
                  'num_partitions': broker_cnt * 2,
                  'version': '2.2.0',
                  'sasl_mechanisms': 'GSSAPI',
                  'realm': broker_realm,
                  'conf': ['connections.max.idle.ms=60000']}

    brokers = dict()
    for n in range(0, broker_cnt):
        broker = KafkaBrokerApp(cluster, brokerconf)
        brokers[broker.appid] = broker

    # Get bootstrap server list
    security_protocol = 'SASL_PLAINTEXT'
    all_listeners = (','.join(cluster.get_all(
        'listeners', '', KafkaBrokerApp))).split(',')
    bootstrap_servers = ','.join([x for x in all_listeners
                                  if x.startswith(security_protocol)])

    assert len(bootstrap_servers) > 0, "no bootstrap servers"

    print("## Deploying cluster")
    cluster.deploy()
    print("## Starting cluster")
    cluster.start(timeout=30)

    # Add cross-realm TGTs
    for realm in realms:
        for crealm in [x for x in realms if x != realm]:
            kdcs[realm].execute('kadmin.local -d "{}" -q "addprinc -requires_preauth -pw password krbtgt/{}@{}"'.format(kdcs[realm].conf.get('dbpath'), crealm, realm)).wait()
            kdcs[realm].execute('kadmin.local -d "{}" -q "addprinc -requires_preauth -pw password krbtgt/{}@{}"'.format(kdcs[realm].conf.get('dbpath'), realm, crealm)).wait()

    # Create client base configuration
    client_config = {
        'bootstrap.servers': bootstrap_servers,
        'enable.sparse.connections': False,
        'broker.address.family': 'v4',
        'sasl.mechanisms': 'GSSAPI',
        'security.protocol': security_protocol,
        'debug': 'broker,security'
    }

    os.environ['KRB5CCNAME'] = client_kdc.mkpath('krb5cc')
    os.environ['KRB5_CONFIG'] = client_kdc.conf['krb5_conf']
    os.environ['KRB5_KDC_PROFILE'] = client_kdc.conf['kdc_conf']
    principal,keytab = client_kdc.add_principal("admin")

    client_config['sasl.kerberos.keytab'] = keytab
    client_config['sasl.kerberos.principal'] = principal.split('@')[0]
    client_config['sasl.kerberos.min.time.before.relogin'] = 120*1000*3

    print(client_config)

    print("bootstraps: {}".format(client_config['bootstrap.servers']))
    p = Producer(client_config)

    time.sleep(10)
    for n in range(1, 100):
        p.produce(topic, "msg #{}".format(n))

        p.poll(1.0)

    p.flush(1.0)

    print("####### {} messages remaining\n\n\n".format(len(p)))

    start = time.time()
    end = start + (90*60)
    until = start + (12*60)
    while time.time() < end:
        now = time.time()
        if until < now:
            print("### Producing 2 messages")
            for n in range(1, 2):
                p.produce(topic, "msg #{}".format(n))
            until = now + (12*60)

        p.poll(1.0)

    del p

    cluster.stop()
def test_version (version, cmd=None, deploy=True, conf={}, debug=False):
    """
    @brief Create, deploy and start a Kafka cluster using Kafka \p version
    Then run librdkafka's regression tests.
    """

    print('## Test version %s' % version)
    
    cluster = Cluster('librdkafkaInteractiveBrokerVersionTests', 'tmp', debug=debug)

    # One ZK (from Kafka repo)
    zk1 = ZookeeperApp(cluster, bin_path=kafka_path + '/bin/zookeeper-server-start.sh')
    zk_address = zk1.get('address')

    # Start Kerberos KDC if GSSAPI is configured
    if 'GSSAPI' in args.conf.get('sasl_mechanisms', []):
        KerberosKdcApp(cluster, 'MYREALM').start()

    # Three brokers
    defconf = {'replication_factor': 3, 'num_partitions': 4, 'version': version}
    defconf.update(conf)

    print('conf: ', defconf)

    broker1 = KafkaBrokerApp(cluster, defconf, kafka_path=kafka_path)
    broker2 = KafkaBrokerApp(cluster, defconf, kafka_path=kafka_path)
    broker3 = KafkaBrokerApp(cluster, defconf, kafka_path=kafka_path)

    # Generate test config file
    security_protocol='PLAINTEXT'
    fd, test_conf_file = tempfile.mkstemp(prefix='test_conf', text=True)
    if version != 'trunk':
        os.write(fd, ('broker.version.fallback=%s\n' % version).encode('ascii'))
    else:
        os.write(fd, 'api.version.request=true\n')
    # SASL (only one mechanism supported)
    mech = defconf.get('sasl_mechanisms', '').split(',')[0]
    if mech != '':
        os.write(fd, 'sasl.mechanisms=%s\n' % mech)
        if mech == 'PLAIN':
            print('# Writing SASL PLAIN client config to %s' % test_conf_file)
            security_protocol='SASL_PLAINTEXT'
            # Use first user as SASL user/pass
            for up in defconf.get('sasl_users', '').split(','):
                u,p = up.split('=')
                os.write(fd, 'sasl.username=%s\n' % u)
                os.write(fd, 'sasl.password=%s\n' % p)
                break
        else:
            print('# FIXME: SASL %s client config not written to %s' % (mech, test_conf_file))

    # Define bootstrap brokers based on selected security protocol
    print('# Using client security.protocol=%s' % security_protocol)
    all_listeners = (','.join(cluster.get_all('listeners', '', KafkaBrokerApp))).split(',')
    bootstrap_servers = ','.join([x for x in all_listeners if x.startswith(security_protocol)])
    os.write(fd, ('bootstrap.servers=%s\n' % bootstrap_servers).encode('ascii'))
    os.write(fd, 'security.protocol=%s\n' % security_protocol)
    os.close(fd)

    if deploy:
        print('# Deploying cluster')
        cluster.deploy()
    else:
        print('# Not deploying')

    print('# Starting cluster')
    cluster.start()

    print('# Waiting for brokers to come up')

    if not cluster.wait_operational(30):
        cluster.stop(force=True)
        raise Exception('Cluster did not go operational, see logs in %s' % \
                        (cluster.root_path))

    print('# Connect to cluster with bootstrap.servers %s' % bootstrap_servers)

    cmd_env = 'KAFKA_PATH=%s RDKAFKA_TEST_CONF=%s ZK_ADDRESS=%s BROKERS=%s TEST_KAFKA_VERSION=%s' % \
              (broker1.conf.get('destdir'), test_conf_file, zk_address, bootstrap_servers, version)
    if not cmd:
        cmd = 'bash --rcfile <(cat ~/.bashrc; echo \'PS1="[TRIVUP:%s@%s] \\u@\\h:\w$ "\')' % (cluster.name, version)
    subprocess.call('%s %s' % (cmd_env, cmd), shell=True, executable='/bin/bash')

    os.remove(test_conf_file)

    cluster.stop(force=True)

    cluster.cleanup(keeptypes=['log'])
    return True
Exemplo n.º 9
0
    def __init__(self, **kwargs):
        """ Create and start a KafkaCluster.
            See default_conf above for parameters. """
        super(KafkaCluster, self).__init__()

        conf = kwargs
        self.conf = deepcopy(self.default_conf)
        if conf is not None:
            self.conf.update(conf)

        self.version = self.conf.get('version')

        # Create trivup Cluster
        self.cluster = Cluster(self.__class__.__name__,
                               os.environ.get(
                                   'TRIVUP_ROOT',
                                   'tmp-%s' % self.__class__.__name__),
                               debug=bool(self.conf.get('debug', False)))

        self._client_conf = dict()
        self.env = dict()

        self.sasl_mechanism = self.conf.get('sasl_mechanism')

        # Generate SSL certs if enabled
        if bool(self.conf.get('with_ssl')):
            self.ssl = SslApp(self.cluster, self.conf)
        else:
            self.ssl = None

        # Map mechanism and SSL to security protocol
        self.security_protocol = {
            (True, True): 'SASL_SSL',
            (True, False): 'SASL_PLAINTEXT',
            (False, True): 'SSL',
            (False, False): 'PLAINTEXT'
        }[(bool(self.sasl_mechanism), bool(self.ssl is not None))]

        # Create single ZK for the cluster (don't start yet')
        self.zk = ZookeeperApp(self.cluster)

        # Broker configuration
        broker_cnt = int(self.conf.get('broker_cnt'))
        self.broker_conf = {
            'replication_factor': min(3, broker_cnt),
            'num_partitions': 4,
            'version': self.version,
            'sasl_mechanisms': self.sasl_mechanism,
            'sasl_users': self.conf.get('sasl_users'),
            'conf': self.conf.get('broker_conf', [])
        }

        # Start Kerberos KDCs if GSSAPI (Kerberos) is configured
        if self.sasl_mechanism == 'GSSAPI':
            self._setup_kerberos()
            self.broker_conf['realm'] = self.broker_realm

        # Create brokers (don't start yet)
        self.brokers = dict()
        for n in range(0, broker_cnt):
            broker = KafkaBrokerApp(self.cluster, self.broker_conf)
            self.brokers[broker.appid] = broker

        # Generate bootstrap servers list
        all_listeners = (','.join(
            self.cluster.get_all('advertised_listeners', '',
                                 KafkaBrokerApp))).split(',')
        self.bootstrap_servers = ','.join(
            [x for x in all_listeners if x.startswith(self.security_protocol)])

        assert len(self.bootstrap_servers) >= broker_cnt, \
            "{} < {} expected bootstrap servers".format(
                len(self.bootstrap_servers), broker_cnt)

        # Create SchemaRegistry if enabled
        if bool(self.conf.get('with_sr', False)):
            self.sr = SchemaRegistryApp(
                self.cluster, {'version': self.conf.get('cp_version')})
            self.env['SR_URL'] = self.sr.get('url')

        # Create librdkafka client configuration
        self._setup_client_conf()

        # Deploy cluster
        self.cluster.deploy()

        # Start cluster
        self.start()
Exemplo n.º 10
0
def test_version (version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt=1,
                  root_path='tmp', broker_cnt=3):
    """
    @brief Create, deploy and start a Kafka cluster using Kafka \p version
    Then run librdkafka's regression tests.
    """

    print('## Test version %s' % version)
    
    cluster = Cluster('LibrdkafkaTestCluster', root_path, debug=debug)

    # Enable SSL if desired
    if 'SSL' in conf.get('security.protocol', ''):
        cluster.ssl = SslApp(cluster, conf)

    # One ZK (from Kafka repo)
    zk1 = ZookeeperApp(cluster)
    zk_address = zk1.get('address')

    # Start Kerberos KDC if GSSAPI is configured
    if 'GSSAPI' in args.conf.get('sasl_mechanisms', []):
        KerberosKdcApp(cluster, 'MYREALM').start()

    defconf = {'replication_factor': min(int(conf.get('replication_factor', broker_cnt)), 3), 'num_partitions': 4, 'version': version}
    defconf.update(conf)

    print('conf: ', defconf)

    brokers = []
    for n in range(0, broker_cnt):
        brokers.append(KafkaBrokerApp(cluster, defconf))

    # Generate test config file
    security_protocol='PLAINTEXT'
    fd, test_conf_file = tempfile.mkstemp(prefix='test_conf', text=True)
    os.write(fd, ('test.sql.command=sqlite3 rdktests\n').encode('ascii'))
    os.write(fd, 'broker.address.family=v4\n'.encode('ascii'))
    if version != 'trunk':
        os.write(fd, ('broker.version.fallback=%s\n' % version).encode('ascii'))
    else:
        os.write(fd, 'api.version.request=true\n'.encode('ascii'))
    # SASL (only one mechanism supported)
    mech = defconf.get('sasl_mechanisms', '').split(',')[0]
    if mech != '':
        os.write(fd, ('sasl.mechanisms=%s\n' % mech).encode('ascii'))
        if mech == 'PLAIN' or mech.find('SCRAM') != -1:
            print('# Writing SASL %s client config to %s' % (mech, test_conf_file))
            security_protocol='SASL_PLAINTEXT'
            # Use first user as SASL user/pass
            for up in defconf.get('sasl_users', '').split(','):
                u,p = up.split('=')
                os.write(fd, ('sasl.username=%s\n' % u).encode('ascii'))
                os.write(fd, ('sasl.password=%s\n' % p).encode('ascii'))
                break
        elif mech == 'OAUTHBEARER':
            security_protocol='SASL_PLAINTEXT'
            os.write(fd, ('sasl.oauthbearer.config=%s\n' % \
                          'scope=requiredScope principal=admin').encode('ascii'))
        else:
            print('# FIXME: SASL %s client config not written to %s' % (mech, test_conf_file))

    # SSL support
    ssl = getattr(cluster, 'ssl', None)
    if ssl is not None:
        if 'SASL' in security_protocol:
            security_protocol = 'SASL_SSL'
        else:
            security_protocol = 'SSL'

        key, req, pem = ssl.create_key('librdkafka')

        os.write(fd, ('ssl.ca.location=%s\n' % ssl.ca_cert).encode('ascii'))
        os.write(fd, ('ssl.certificate.location=%s\n' % pem).encode('ascii'))
        os.write(fd, ('ssl.key.location=%s\n' % key).encode('ascii'))
        os.write(fd, ('ssl.key.password=%s\n' % ssl.conf.get('ssl_key_pass')).encode('ascii'))


    # Define bootstrap brokers based on selected security protocol
    print('# Using client security.protocol=%s' % security_protocol)
    all_listeners = (','.join(cluster.get_all('listeners', '', KafkaBrokerApp))).split(',')
    bootstrap_servers = ','.join([x for x in all_listeners if x.startswith(security_protocol)])
    os.write(fd, ('bootstrap.servers=%s\n' % bootstrap_servers).encode('ascii'))
    os.write(fd, ('security.protocol=%s\n' % security_protocol).encode('ascii'))
    os.close(fd)

    if deploy:
        print('# Deploying cluster')
        cluster.deploy()
    else:
        print('# Not deploying')

    print('# Starting cluster, instance path %s' % cluster.instance_path())
    cluster.start()

    print('# Waiting for brokers to come up')

    if not cluster.wait_operational(30):
        cluster.stop(force=True)
        raise Exception('Cluster %s did not go operational, see logs in %s/%s' % \
                        (cluster.name, cluster.root_path, cluster.instance))

    print('# Connect to cluster with bootstrap.servers %s' % bootstrap_servers)

    cmd_env = os.environ.copy()
    cmd_env['KAFKA_PATH'] = brokers[0].conf.get('destdir')
    cmd_env['RDKAFKA_TEST_CONF'] = test_conf_file
    cmd_env['ZK_ADDRESS'] = zk_address
    cmd_env['BROKERS'] = bootstrap_servers
    cmd_env['TEST_KAFKA_VERSION'] = version
    cmd_env['TRIVUP_ROOT'] = cluster.instance_path()
    # Add each broker pid as an env so they can be killed indivdidually.
    for b in [x for x in cluster.apps if isinstance(x, KafkaBrokerApp)]:
        cmd_env['BROKER_PID_%d' % b.appid] = str(b.proc.pid)

    if not cmd:
        cmd = 'bash --rcfile <(cat ~/.bashrc; echo \'PS1="[TRIVUP:%s@%s] \\u@\\h:\w$ "\')' % (cluster.name, version)

    ret = True

    for i in range(0, exec_cnt):
        retcode = subprocess.call(cmd, env=cmd_env, shell=True, executable='/bin/bash')
        if retcode != 0:
            print('# Command failed with returncode %d: %s' % (retcode, cmd))
            ret = False

    try:
        os.remove(test_conf_file)
    except:
        pass

    cluster.stop(force=True)

    cluster.cleanup(keeptypes=['log'])
    return ret
Exemplo n.º 11
0
def test_version(version):
    """
    @brief Create, deploy and start a Kafka cluster using Kafka \p version
    Then run librdkafka's regression tests.
    """

    cluster = Cluster('librdkafkaBrokerVersionTests', 'tmp')

    # One ZK (from Kafka repo)
    zk1 = ZookeeperApp(cluster,
                       bin_path=kafka_path + '/bin/zookeeper-server-start.sh')
    zk_address = zk1.get('address')

    # Two brokers
    conf = {'replication_factor': 3, 'num_partitions': 4, 'version': version}
    broker1 = KafkaBrokerApp(cluster, conf, kafka_path=kafka_path)
    broker2 = KafkaBrokerApp(cluster, conf, kafka_path=kafka_path)
    broker3 = KafkaBrokerApp(cluster, conf, kafka_path=kafka_path)
    bootstrap_servers = ','.join(cluster.get_all('address', '',
                                                 KafkaBrokerApp))

    # Generate test config file
    fd, test_conf_file = tempfile.mkstemp(prefix='test_conf', text=True)
    os.write(fd, 'bootstrap.servers=%s\n' % bootstrap_servers)
    os.close(fd)

    print('# Deploying cluster')
    cluster.deploy()

    print('# Starting cluster')
    cluster.start()

    print('# Waiting for brokers to come up')

    if not cluster.wait_operational(30):
        raise TimeoutError('Cluster did not go operational')

    print('# Connect to cluster with bootstrap.servers %s' % bootstrap_servers)

    print('\033[32mCluster started.. Executing librdkafka tests\033[0m')
    r = subprocess.call(
        'TEST_LEVEL=%d RDKAFKA_TEST_CONF=%s ZK_ADDRESS=%s make' %
        (test_level, test_conf_file, zk_address),
        shell=True)
    if r == 0:
        print('\033[37;42mTests PASSED on broker version %s\033[0m' % version)
        ret = True
    else:
        print('\033[33;41mTests FAILED on broker version %s (ret %d)\033[0m' %
              (version, r))
        ret = False

    os.remove(test_conf_file)

    cluster.stop(force=True)

    cluster.cleanup(keeptypes=['log'])
    return ret
Exemplo n.º 12
0
def test_version (version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt=1,
                  root_path='tmp', broker_cnt=3):
    """
    @brief Create, deploy and start a Kafka cluster using Kafka \p version
    Then run librdkafka's regression tests.
    """

    print('## Test version %s' % version)
    
    cluster = Cluster('LibrdkafkaTestCluster', root_path, debug=debug)

    # Enable SSL if desired
    if 'SSL' in conf.get('security.protocol', ''):
        cluster.ssl = SslApp(cluster, conf)

    # One ZK (from Kafka repo)
    zk1 = ZookeeperApp(cluster)
    zk_address = zk1.get('address')

    # Start Kerberos KDC if GSSAPI is configured
    if 'GSSAPI' in args.conf.get('sasl_mechanisms', []):
        KerberosKdcApp(cluster, 'MYREALM').start()

    defconf = {'replication_factor': min(broker_cnt, 3), 'num_partitions': 4, 'version': version}
    defconf.update(conf)

    print('conf: ', defconf)

    brokers = []
    for n in range(0, broker_cnt):
        brokers.append(KafkaBrokerApp(cluster, defconf))

    # Generate test config file
    security_protocol='PLAINTEXT'
    fd, test_conf_file = tempfile.mkstemp(prefix='test_conf', text=True)
    if version != 'trunk':
        os.write(fd, ('broker.version.fallback=%s\n' % version).encode('ascii'))
    else:
        os.write(fd, 'api.version.request=true\n'.encode('ascii'))
    # SASL (only one mechanism supported)
    mech = defconf.get('sasl_mechanisms', '').split(',')[0]
    if mech != '':
        os.write(fd, ('sasl.mechanisms=%s\n' % mech).encode('ascii'))
        if mech == 'PLAIN':
            print('# Writing SASL PLAIN client config to %s' % test_conf_file)
            security_protocol='SASL_PLAINTEXT'
            # Use first user as SASL user/pass
            for up in defconf.get('sasl_users', '').split(','):
                u,p = up.split('=')
                os.write(fd, ('sasl.username=%s\n' % u).encode('ascii'))
                os.write(fd, ('sasl.password=%s\n' % p).encode('ascii'))
                break
        else:
            print('# FIXME: SASL %s client config not written to %s' % (mech, test_conf_file))

    # SSL support
    ssl = getattr(cluster, 'ssl', None)
    if ssl is not None:
        if 'SASL' in security_protocol:
            security_protocol = 'SASL_SSL'
        else:
            security_protocol = 'SSL'

        key, req, pem = ssl.create_key('librdkafka')

        os.write(fd, ('ssl.ca.location=%s\n' % ssl.ca_cert).encode('ascii'))
        os.write(fd, ('ssl.certificate.location=%s\n' % pem).encode('ascii'))
        os.write(fd, ('ssl.key.location=%s\n' % key).encode('ascii'))
        os.write(fd, ('ssl.key.password=%s\n' % ssl.conf.get('ssl_key_pass')).encode('ascii'))


    # Define bootstrap brokers based on selected security protocol
    print('# Using client security.protocol=%s' % security_protocol)
    all_listeners = (','.join(cluster.get_all('listeners', '', KafkaBrokerApp))).split(',')
    bootstrap_servers = ','.join([x for x in all_listeners if x.startswith(security_protocol)])
    os.write(fd, ('bootstrap.servers=%s\n' % bootstrap_servers).encode('ascii'))
    os.write(fd, ('security.protocol=%s\n' % security_protocol).encode('ascii'))
    os.close(fd)

    if deploy:
        print('# Deploying cluster')
        cluster.deploy()
    else:
        print('# Not deploying')

    print('# Starting cluster, instance path %s' % cluster.instance_path())
    cluster.start()

    print('# Waiting for brokers to come up')

    if not cluster.wait_operational(30):
        cluster.stop(force=True)
        raise Exception('Cluster %s did not go operational, see logs in %s/%s' % \
                        (cluster.name, cluster.root_path, cluster.instance))

    print('# Connect to cluster with bootstrap.servers %s' % bootstrap_servers)

    cmd_env = 'export KAFKA_PATH="%s" RDKAFKA_TEST_CONF="%s" ZK_ADDRESS="%s" BROKERS="%s" TEST_KAFKA_VERSION="%s" TRIVUP_ROOT="%s"; ' % \
              (brokers[0].conf.get('destdir'), test_conf_file, zk_address, bootstrap_servers, version, cluster.instance_path())
    if not cmd:
        cmd = 'bash --rcfile <(cat ~/.bashrc; echo \'PS1="[TRIVUP:%s@%s] \\u@\\h:\w$ "\')' % (cluster.name, version)
    for i in range(0, exec_cnt):
        subprocess.call('%s %s' % (cmd_env, cmd), shell=True, executable='/bin/bash')

    os.remove(test_conf_file)

    cluster.stop(force=True)

    cluster.cleanup(keeptypes=['log'])
    return True
Exemplo n.º 13
0
#!/usr/bin/env python

from trivup.trivup import Cluster
from trivup.apps.ZookeeperApp import ZookeeperApp
from trivup.apps.KafkaBrokerApp import KafkaBrokerApp

import subprocess

if __name__ == '__main__':
    cluster = Cluster('TestCluster', 'tmp')

    # One ZK
    zk1 = ZookeeperApp(
        cluster,
        bin_path='/home/maglun/src/kafka/bin/zookeeper-server-start.sh'
    )  # noqa: E501

    # Two brokers
    conf = {'replication_factor': 3, 'num_partitions': 4}
    broker1 = KafkaBrokerApp(cluster, conf)
    broker2 = KafkaBrokerApp(cluster, conf)
    broker3 = KafkaBrokerApp(cluster, conf)
    bootstrap_servers = ','.join(cluster.get_all('address', '',
                                                 KafkaBrokerApp))

    print('# Deploying cluster')
    cluster.deploy()

    print('# Starting cluster')
    cluster.start()
def test_version (version, cmd=None, deploy=True):
    """
    @brief Create, deploy and start a Kafka cluster using Kafka \p version
    Then run librdkafka's regression tests.
    """
    
    cluster = Cluster('librdkafkaInteractiveBrokerVersionTests', 'tmp')

    # One ZK (from Kafka repo)
    zk1 = ZookeeperApp(cluster, bin_path=kafka_path + '/bin/zookeeper-server-start.sh')
    zk_address = zk1.get('address')

    # Two brokers
    conf = {'replication_factor': 3, 'num_partitions': 4, 'version': version}
    broker1 = KafkaBrokerApp(cluster, conf, kafka_path=kafka_path)
    broker2 = KafkaBrokerApp(cluster, conf, kafka_path=kafka_path)
    broker3 = KafkaBrokerApp(cluster, conf, kafka_path=kafka_path)
    bootstrap_servers = ','.join(cluster.get_all('address','',KafkaBrokerApp))

    # Generate test config file
    fd, test_conf_file = tempfile.mkstemp(prefix='test_conf', text=True)
    os.write(fd, ('bootstrap.servers=%s\n' % bootstrap_servers).encode('ascii'))
    if version != 'trunk':
        os.write(fd, ('broker.version=%s\n' % version).encode('ascii'))
    os.close(fd)

    if deploy:
        print('# Deploying cluster')
        cluster.deploy()
    else:
        print('# Not deploying')

    print('# Starting cluster')
    cluster.start()

    print('# Waiting for brokers to come up')

    if not cluster.wait_operational(30):
        raise TimeoutError('Cluster did not go operational')

    print('# Connect to cluster with bootstrap.servers %s' % bootstrap_servers)

    cmd_env = 'RDKAFKA_TEST_CONF=%s ZK_ADDRESS=%s KAFKA_VERSION=%s' % (test_conf_file, zk_address, version)
    if not cmd:
        cmd = 'bash --rcfile <(cat ~/.bashrc; echo \'PS1="[TRIVUP:%s@%s] \\u@\\h:\w$ "\')' % (cluster.name, version)
    subprocess.call('%s %s' % (cmd_env, cmd), shell=True, executable='/bin/bash')

    os.remove(test_conf_file)

    cluster.stop(force=True)

    cluster.cleanup(keeptypes=['log'])
    return True
Exemplo n.º 15
0
#!/usr/bin/env python

from trivup.trivup import Cluster
from trivup.apps.SslApp import SslApp

import subprocess
import time


if __name__ == '__main__':
    cluster = Cluster('TestCluster', 'tmp', debug=True)

    # SSL App
    ssl = SslApp(cluster)

    a = ssl.create_keystore('mybroker')
    print('created keystore: %s' % str(a))

    b = ssl.create_key('myclient')
    print('created key: %s' % str(b))

    cluster.cleanup(keeptypes=[])

Exemplo n.º 16
0
def test_version (version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt=1,
                  root_path='tmp', broker_cnt=3, scenario='default'):
    """
    @brief Create, deploy and start a Kafka cluster using Kafka \p version
    Then run librdkafka's regression tests.
    """

    print('## Test version %s' % version)

    cluster = Cluster('LibrdkafkaTestCluster', root_path, debug=debug)

    # Enable SSL if desired
    if 'SSL' in conf.get('security.protocol', ''):
        cluster.ssl = SslApp(cluster, conf)

    # One ZK (from Kafka repo)
    zk1 = ZookeeperApp(cluster)
    zk_address = zk1.get('address')

    # Start Kerberos KDC if GSSAPI is configured
    if 'GSSAPI' in args.conf.get('sasl_mechanisms', []):
        KerberosKdcApp(cluster, 'MYREALM').start()

    defconf = {'version': version}
    defconf.update(conf)

    print('conf: ', defconf)

    brokers = []
    for n in range(0, broker_cnt):
        # Configure rack & replica selector if broker supports fetch-from-follower
        if version_as_number(version) >= 2.4:
            defconf.update({'conf': ['broker.rack=RACK${appid}', 'replica.selector.class=org.apache.kafka.common.replica.RackAwareReplicaSelector']})
        brokers.append(KafkaBrokerApp(cluster, defconf))

    cmd_env = os.environ.copy()

    # Generate test config file
    security_protocol='PLAINTEXT'
    fd, test_conf_file = tempfile.mkstemp(prefix='test_conf', text=True)
    os.write(fd, ('test.sql.command=sqlite3 rdktests\n').encode('ascii'))
    os.write(fd, 'broker.address.family=v4\n'.encode('ascii'))
    if version.startswith('0.9') or version.startswith('0.8'):
        os.write(fd, 'api.version.request=false\n'.encode('ascii'))
        os.write(fd, ('broker.version.fallback=%s\n' % version).encode('ascii'))
    # SASL (only one mechanism supported)
    mech = defconf.get('sasl_mechanisms', '').split(',')[0]
    if mech != '':
        os.write(fd, ('sasl.mechanisms=%s\n' % mech).encode('ascii'))
        if mech == 'PLAIN' or mech.find('SCRAM') != -1:
            print('# Writing SASL %s client config to %s' % (mech, test_conf_file))
            security_protocol='SASL_PLAINTEXT'
            # Use first user as SASL user/pass
            for up in defconf.get('sasl_users', '').split(','):
                u,p = up.split('=')
                os.write(fd, ('sasl.username=%s\n' % u).encode('ascii'))
                os.write(fd, ('sasl.password=%s\n' % p).encode('ascii'))
                break
        elif mech == 'OAUTHBEARER':
            security_protocol='SASL_PLAINTEXT'
            os.write(fd, ('enable.sasl.oauthbearer.unsecure.jwt=true\n'))
            os.write(fd, ('sasl.oauthbearer.config=%s\n' % \
                          'scope=requiredScope principal=admin').encode('ascii'))
        else:
            print('# FIXME: SASL %s client config not written to %s' % (mech, test_conf_file))

    # SSL support
    ssl = getattr(cluster, 'ssl', None)
    if ssl is not None:
        if 'SASL' in security_protocol:
            security_protocol = 'SASL_SSL'
        else:
            security_protocol = 'SSL'

        key = ssl.create_cert('librdkafka')

        os.write(fd, ('ssl.ca.location=%s\n' % ssl.ca['pem']).encode('ascii'))
        os.write(fd, ('ssl.certificate.location=%s\n' % key['pub']['pem']).encode('ascii'))
        os.write(fd, ('ssl.key.location=%s\n' % key['priv']['pem']).encode('ascii'))
        os.write(fd, ('ssl.key.password=%s\n' % key['password']).encode('ascii'))

        for k, v in ssl.ca.iteritems():
            cmd_env['RDK_SSL_ca_{}'.format(k)] = v

        # Set envs for all generated keys so tests can find them.
        for k, v in key.iteritems():
            if type(v) is dict:
                for k2, v2 in v.iteritems():
                    # E.g. "RDK_SSL_priv_der=path/to/librdkafka-priv.der"
                    cmd_env['RDK_SSL_{}_{}'.format(k, k2)] = v2
            else:
                cmd_env['RDK_SSL_{}'.format(k)] = v


    # Define bootstrap brokers based on selected security protocol
    print('# Using client security.protocol=%s' % security_protocol)
    all_listeners = (','.join(cluster.get_all('listeners', '', KafkaBrokerApp))).split(',')
    bootstrap_servers = ','.join([x for x in all_listeners if x.startswith(security_protocol)])
    os.write(fd, ('bootstrap.servers=%s\n' % bootstrap_servers).encode('ascii'))
    os.write(fd, ('security.protocol=%s\n' % security_protocol).encode('ascii'))
    os.close(fd)

    if deploy:
        print('# Deploying cluster')
        cluster.deploy()
    else:
        print('# Not deploying')

    print('# Starting cluster, instance path %s' % cluster.instance_path())
    cluster.start()

    print('# Waiting for brokers to come up')

    if not cluster.wait_operational(30):
        cluster.stop(force=True)
        raise Exception('Cluster %s did not go operational, see logs in %s/%s' % \
                        (cluster.name, cluster.root_path, cluster.instance))

    print('# Connect to cluster with bootstrap.servers %s' % bootstrap_servers)

    cmd_env['KAFKA_PATH'] = brokers[0].conf.get('destdir')
    cmd_env['RDKAFKA_TEST_CONF'] = test_conf_file
    cmd_env['ZK_ADDRESS'] = zk_address
    cmd_env['BROKERS'] = bootstrap_servers
    cmd_env['TEST_KAFKA_VERSION'] = version
    cmd_env['TRIVUP_ROOT'] = cluster.instance_path()
    cmd_env['TEST_SCENARIO'] = scenario

    # Per broker env vars
    for b in [x for x in cluster.apps if isinstance(x, KafkaBrokerApp)]:
        cmd_env['BROKER_ADDRESS_%d' % b.appid] = b.conf['address']
        # Add each broker pid as an env so they can be killed indivdidually.
        cmd_env['BROKER_PID_%d' % b.appid] = str(b.proc.pid)
        # JMX port, if available
        jmx_port = b.conf.get('jmx_port', None)
        if jmx_port is not None:
            cmd_env['BROKER_JMX_PORT_%d' % b.appid] = str(jmx_port)

    if not cmd:
        cmd_env['PS1'] = '[TRIVUP:%s@%s] \\u@\\h:\w$ ' % (cluster.name, version)
        cmd = 'bash --rcfile <(cat ~/.bashrc)'

    ret = True

    for i in range(0, exec_cnt):
        retcode = subprocess.call(cmd, env=cmd_env, shell=True, executable='/bin/bash')
        if retcode != 0:
            print('# Command failed with returncode %d: %s' % (retcode, cmd))
            ret = False

    try:
        os.remove(test_conf_file)
    except:
        pass

    cluster.stop(force=True)

    cluster.cleanup(keeptypes=['log'])
    return ret
Exemplo n.º 17
0
#!/usr/bin/env python

from trivup.trivup import Cluster
from trivup.apps.SslApp import SslApp

if __name__ == '__main__':
    cluster = Cluster('TestCluster', 'tmp', debug=True)

    # SSL App
    ssl = SslApp(cluster)

    a = ssl.create_keystore('mybroker')
    print('created keystore: %s' % a)

    r = ssl.create_cert('myclient')
    print('created key: %s' % r)

    cluster.cleanup(keeptypes=[])
def test_version(version, cmd=None, deploy=True):
    """
    @brief Create, deploy and start a Kafka cluster using Kafka \p version
    Then run librdkafka's regression tests.
    """

    cluster = Cluster('librdkafkaInteractiveBrokerVersionTests', 'tmp')

    # One ZK (from Kafka repo)
    zk1 = ZookeeperApp(cluster,
                       bin_path=kafka_path + '/bin/zookeeper-server-start.sh')
    zk_address = zk1.get('address')

    # Two brokers
    conf = {'replication_factor': 3, 'num_partitions': 4, 'version': version}
    broker1 = KafkaBrokerApp(cluster, conf, kafka_path=kafka_path)
    broker2 = KafkaBrokerApp(cluster, conf, kafka_path=kafka_path)
    broker3 = KafkaBrokerApp(cluster, conf, kafka_path=kafka_path)
    bootstrap_servers = ','.join(cluster.get_all('address', '',
                                                 KafkaBrokerApp))

    # Generate test config file
    fd, test_conf_file = tempfile.mkstemp(prefix='test_conf', text=True)
    os.write(fd,
             ('bootstrap.servers=%s\n' % bootstrap_servers).encode('ascii'))
    if version != 'trunk':
        os.write(fd, ('broker.version=%s\n' % version).encode('ascii'))
    os.close(fd)

    if deploy:
        print('# Deploying cluster')
        cluster.deploy()
    else:
        print('# Not deploying')

    print('# Starting cluster')
    cluster.start()

    print('# Waiting for brokers to come up')

    if not cluster.wait_operational(30):
        raise TimeoutError('Cluster did not go operational')

    print('# Connect to cluster with bootstrap.servers %s' % bootstrap_servers)

    cmd_env = 'RDKAFKA_TEST_CONF=%s ZK_ADDRESS=%s KAFKA_VERSION=%s' % (
        test_conf_file, zk_address, version)
    if not cmd:
        cmd = 'bash --rcfile <(cat ~/.bashrc; echo \'PS1="[TRIVUP:%s@%s] \\u@\\h:\w$ "\')' % (
            cluster.name, version)
    subprocess.call('%s %s' % (cmd_env, cmd),
                    shell=True,
                    executable='/bin/bash')

    os.remove(test_conf_file)

    cluster.stop(force=True)

    cluster.cleanup(keeptypes=['log'])
    return True
Exemplo n.º 19
0
def test_version(version,
                 cmd=None,
                 deploy=True,
                 conf={},
                 debug=False,
                 exec_cnt=1,
                 root_path='tmp',
                 broker_cnt=3):
    """
    @brief Create, deploy and start a Kafka cluster using Kafka \p version
    Then run librdkafka's regression tests.
    """

    print('## Test version %s' % version)

    cluster = Cluster('LibrdkafkaTestCluster', root_path, debug=debug)

    # Enable SSL if desired
    if 'SSL' in conf.get('security.protocol', ''):
        cluster.ssl = SslApp(cluster, conf)

    # One ZK (from Kafka repo)
    zk1 = ZookeeperApp(cluster)
    zk_address = zk1.get('address')

    # Start Kerberos KDC if GSSAPI is configured
    if 'GSSAPI' in args.conf.get('sasl_mechanisms', []):
        KerberosKdcApp(cluster, 'MYREALM').start()

    defconf = {
        'replication_factor': min(broker_cnt, 3),
        'num_partitions': 4,
        'version': version
    }
    defconf.update(conf)

    print('conf: ', defconf)

    brokers = []
    for n in range(0, broker_cnt):
        brokers.append(KafkaBrokerApp(cluster, defconf))

    # Generate test config file
    security_protocol = 'PLAINTEXT'
    fd, test_conf_file = tempfile.mkstemp(prefix='test_conf', text=True)
    os.write(fd, ('test.sql.command=sqlite3 rdktests\n').encode('ascii'))
    os.write(fd, 'broker.address.family=v4\n'.encode('ascii'))
    if version != 'trunk':
        os.write(fd,
                 ('broker.version.fallback=%s\n' % version).encode('ascii'))
    else:
        os.write(fd, 'api.version.request=true\n'.encode('ascii'))
    # SASL (only one mechanism supported)
    mech = defconf.get('sasl_mechanisms', '').split(',')[0]
    if mech != '':
        os.write(fd, ('sasl.mechanisms=%s\n' % mech).encode('ascii'))
        if mech == 'PLAIN' or mech.find('SCRAM') != -1:
            print('# Writing SASL %s client config to %s' %
                  (mech, test_conf_file))
            security_protocol = 'SASL_PLAINTEXT'
            # Use first user as SASL user/pass
            for up in defconf.get('sasl_users', '').split(','):
                u, p = up.split('=')
                os.write(fd, ('sasl.username=%s\n' % u).encode('ascii'))
                os.write(fd, ('sasl.password=%s\n' % p).encode('ascii'))
                break
        else:
            print('# FIXME: SASL %s client config not written to %s' %
                  (mech, test_conf_file))

    # SSL support
    ssl = getattr(cluster, 'ssl', None)
    if ssl is not None:
        if 'SASL' in security_protocol:
            security_protocol = 'SASL_SSL'
        else:
            security_protocol = 'SSL'

        key, req, pem = ssl.create_key('librdkafka')

        os.write(fd, ('ssl.ca.location=%s\n' % ssl.ca_cert).encode('ascii'))
        os.write(fd, ('ssl.certificate.location=%s\n' % pem).encode('ascii'))
        os.write(fd, ('ssl.key.location=%s\n' % key).encode('ascii'))
        os.write(fd, ('ssl.key.password=%s\n' %
                      ssl.conf.get('ssl_key_pass')).encode('ascii'))

    # Define bootstrap brokers based on selected security protocol
    print('# Using client security.protocol=%s' % security_protocol)
    all_listeners = (','.join(cluster.get_all('listeners', '',
                                              KafkaBrokerApp))).split(',')
    bootstrap_servers = ','.join(
        [x for x in all_listeners if x.startswith(security_protocol)])
    os.write(fd,
             ('bootstrap.servers=%s\n' % bootstrap_servers).encode('ascii'))
    os.write(fd,
             ('security.protocol=%s\n' % security_protocol).encode('ascii'))
    os.close(fd)

    if deploy:
        print('# Deploying cluster')
        cluster.deploy()
    else:
        print('# Not deploying')

    print('# Starting cluster, instance path %s' % cluster.instance_path())
    cluster.start()

    print('# Waiting for brokers to come up')

    if not cluster.wait_operational(30):
        cluster.stop(force=True)
        raise Exception('Cluster %s did not go operational, see logs in %s/%s' % \
                        (cluster.name, cluster.root_path, cluster.instance))

    print('# Connect to cluster with bootstrap.servers %s' % bootstrap_servers)

    cmd_env = 'export KAFKA_PATH="%s" RDKAFKA_TEST_CONF="%s" ZK_ADDRESS="%s" BROKERS="%s" TEST_KAFKA_VERSION="%s" TRIVUP_ROOT="%s"; ' % \
              (brokers[0].conf.get('destdir'), test_conf_file, zk_address, bootstrap_servers, version, cluster.instance_path())
    if not cmd:
        cmd = 'bash --rcfile <(cat ~/.bashrc; echo \'PS1="[TRIVUP:%s@%s] \\u@\\h:\w$ "\')' % (
            cluster.name, version)
    for i in range(0, exec_cnt):
        subprocess.call('%s %s' % (cmd_env, cmd),
                        shell=True,
                        executable='/bin/bash')

    try:
        os.remove(test_conf_file)
    except:
        pass

    cluster.stop(force=True)

    cluster.cleanup(keeptypes=['log'])
    return True
Exemplo n.º 20
0
#!/usr/bin/env python

from trivup.trivup import Cluster
from trivup.apps.ZookeeperApp import ZookeeperApp
from trivup.apps.KafkaBrokerApp import KafkaBrokerApp

import subprocess
import time


if __name__ == '__main__':
    cluster = Cluster('TestCluster', 'tmp')

    # One ZK
    zk1 = ZookeeperApp(cluster, bin_path='/home/maglun/src/kafka/bin/zookeeper-server-start.sh')

    # Two brokers
    conf = {'replication_factor': 3, 'num_partitions': 4}
    broker1 = KafkaBrokerApp(cluster, conf, kafka_path='/home/maglun/src/kafka')
    broker2 = KafkaBrokerApp(cluster, conf, kafka_path='/home/maglun/src/kafka')
    broker3 = KafkaBrokerApp(cluster, conf, kafka_path='/home/maglun/src/kafka')
    bootstrap_servers = ','.join(cluster.get_all('address','',KafkaBrokerApp))

    print('# Deploying cluster')
    cluster.deploy()

    print('# Starting cluster')
    cluster.start()

    print('# Waiting for brokers to come up')