def __init__(self, version, conf={}, num_brokers=3, kafka_path=None, debug=False): """ @brief Create, deploy and start a Kafka cluster using Kafka \p version Supported \p conf keys: * security.protocol - PLAINTEXT, SASL_PLAINTEXT, SSL_SASL \p conf dict is passed to KafkaBrokerApp classes, etc. """ super(LibrdkafkaTestCluster, self).__init__(self.__class__.__name__, 'tmp', debug=debug) self.brokers = list() # One ZK (from Kafka repo) ZookeeperApp(self, bin_path=kafka_path + '/bin/zookeeper-server-start.sh') # Start Kerberos KDC if GSSAPI (Kerberos) is configured if 'GSSAPI' in conf.get('sasl_mechanisms', []): kdc = KerberosKdcApp(self, 'MYREALM') # Kerberos needs to be started prior to Kafka so that principals # and keytabs are available at the time of Kafka config generation. kdc.start() # Brokers defconf = {'replication_factor': min(num_brokers, 3), 'num_partitions': 4, 'version': version, 'security.protocol': 'PLAINTEXT'} defconf.update(conf) self.conf = defconf for n in range(0, num_brokers): self.brokers.append(KafkaBrokerApp(self, defconf, kafka_path=kafka_path))
def __init__(self, version, conf={}, num_brokers=3, debug=False, scenario="default"): """ @brief Create, deploy and start a Kafka cluster using Kafka \\p version Supported \\p conf keys: * security.protocol - PLAINTEXT, SASL_PLAINTEXT, SASL_SSL \\p conf dict is passed to KafkaBrokerApp classes, etc. """ super(LibrdkafkaTestCluster, self).__init__(self.__class__.__name__, os.environ.get('TRIVUP_ROOT', 'tmp'), debug=debug) # Read trivup config from scenario definition. defconf = read_scenario_conf(scenario) defconf.update(conf) # Enable SSL if desired if 'SSL' in conf.get('security.protocol', ''): self.ssl = SslApp(self, defconf) self.brokers = list() # One ZK (from Kafka repo) ZookeeperApp(self) # Start Kerberos KDC if GSSAPI (Kerberos) is configured if 'GSSAPI' in defconf.get('sasl_mechanisms', []): kdc = KerberosKdcApp(self, 'MYREALM') # Kerberos needs to be started prior to Kafka so that principals # and keytabs are available at the time of Kafka config generation. kdc.start() # Brokers defconf.update({ 'replication_factor': min(num_brokers, 3), 'version': version, 'security.protocol': 'PLAINTEXT' }) self.conf = defconf for n in range(0, num_brokers): # Configure rack & replica selector if broker supports # fetch-from-follower if version_as_list(version) >= [2, 4, 0]: defconf.update({ 'conf': [ 'broker.rack=RACK${appid}', 'replica.selector.class=org.apache.kafka.common.replica.RackAwareReplicaSelector' ] }) # noqa: E501 self.brokers.append(KafkaBrokerApp(self, defconf))
def __init__(self, version, conf={}, num_brokers=3, kafka_path=None, debug=False): """ @brief Create, deploy and start a Kafka cluster using Kafka \p version Supported \p conf keys: * security.protocol - PLAINTEXT, SASL_PLAINTEXT, SASL_SSL \p conf dict is passed to KafkaBrokerApp classes, etc. """ super(LibrdkafkaTestCluster, self).__init__(self.__class__.__name__, 'tmp', debug=debug) # Enable SSL if desired if 'SSL' in conf.get('security.protocol', ''): self.ssl = SslApp(self, conf) self.brokers = list() # One ZK (from Kafka repo) ZookeeperApp(self, bin_path=kafka_path + '/bin/zookeeper-server-start.sh') # Start Kerberos KDC if GSSAPI (Kerberos) is configured if 'GSSAPI' in conf.get('sasl_mechanisms', []): kdc = KerberosKdcApp(self, 'MYREALM') # Kerberos needs to be started prior to Kafka so that principals # and keytabs are available at the time of Kafka config generation. kdc.start() # Brokers defconf = { 'replication_factor': min(num_brokers, 3), 'num_partitions': 4, 'version': version, 'security.protocol': 'PLAINTEXT' } defconf.update(conf) self.conf = defconf for n in range(0, num_brokers): self.brokers.append( KafkaBrokerApp(self, defconf, kafka_path=kafka_path))
def _setup_kerberos(self): """ Set up Kerberos KDCs """ # Create KDCs for each realm. # First realm will be the default / broker realm. # realm_cnt = int(self.conf.get('realm_cnt', 1)) # No point in having more than two realms assert realm_cnt > 0 and realm_cnt < 3 realms = ['REALM{}.TRIVUP'.format(x + 1) for x in range(0, realm_cnt)] # Pre-Allocate ports for the KDCs so they can reference eachother # in the krb5.conf configuration. kdc_ports = {x: TcpPortAllocator( self.cluster).next("dummy") for x in realms} # Set up realm=kdc:port cross-realm mappings cross_realms = ",".join(["{}={}:{}".format( x, self.cluster.get_node().name, kdc_ports[x]) for x in realms]) kdcs = dict() for realm in realms: kdc = KerberosKdcApp( self.cluster, realm, conf={'port': kdc_ports[realm], 'cross_realms': cross_realms, 'renew_lifetime': int(self.conf.get('krb_renew_lifetime')), 'ticket_lifetime': int(self.conf.get('krb_ticket_lifetime'))}) # Kerberos needs to be started prior to Kafka so that principals # and keytabs are available at the time of Kafka config generation. kdc.start() kdcs[realm] = kdc self.broker_realm = realms[0] self.client_realm = realms[-1] self.broker_kdc = kdcs[self.broker_realm] self.client_kdc = kdcs[self.client_realm] # Add cross-realm TGTs if realm_cnt > 1: KerberosKdcApp.add_cross_realm_tgts(kdcs) # Add client envs and configuration self.env['KRB5CCNAME'] = self.client_kdc.mkpath('krb5cc') self.env['KRB5_CONFIG'] = self.client_kdc.conf['krb5_conf'] self.env['KRB5_KDC_PROFILE'] = self.client_kdc.conf['kdc_conf'] principal, keytab = self.client_kdc.add_principal('admin') self._client_conf['sasl.kerberos.keytab'] = keytab self._client_conf['sasl.kerberos.principal'] = principal.split('@')[0] # Refresh ticket 60s before renew timeout. self._client_conf['sasl.kerberos.min.time.before.relogin'] = \ max(1, int(self.conf.get('krb_renew_lifetime')) - 60) * 1000
def test_version(version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt=1, root_path='tmp', broker_cnt=3): """ @brief Create, deploy and start a Kafka cluster using Kafka \p version Then run librdkafka's regression tests. """ print('## Test version %s' % version) cluster = Cluster('LibrdkafkaTestCluster', root_path, debug=debug) # Enable SSL if desired if 'SSL' in conf.get('security.protocol', ''): cluster.ssl = SslApp(cluster, conf) # One ZK (from Kafka repo) zk1 = ZookeeperApp(cluster) zk_address = zk1.get('address') # Start Kerberos KDC if GSSAPI is configured if 'GSSAPI' in args.conf.get('sasl_mechanisms', []): KerberosKdcApp(cluster, 'MYREALM').start() defconf = { 'replication_factor': min(broker_cnt, 3), 'num_partitions': 4, 'version': version } defconf.update(conf) print('conf: ', defconf) brokers = [] for n in range(0, broker_cnt): brokers.append(KafkaBrokerApp(cluster, defconf)) # Generate test config file security_protocol = 'PLAINTEXT' fd, test_conf_file = tempfile.mkstemp(prefix='test_conf', text=True) os.write(fd, ('test.sql.command=sqlite3 rdktests\n').encode('ascii')) os.write(fd, 'broker.address.family=v4\n'.encode('ascii')) if version != 'trunk': os.write(fd, ('broker.version.fallback=%s\n' % version).encode('ascii')) else: os.write(fd, 'api.version.request=true\n'.encode('ascii')) # SASL (only one mechanism supported) mech = defconf.get('sasl_mechanisms', '').split(',')[0] if mech != '': os.write(fd, ('sasl.mechanisms=%s\n' % mech).encode('ascii')) if mech == 'PLAIN' or mech.find('SCRAM') != -1: print('# Writing SASL %s client config to %s' % (mech, test_conf_file)) security_protocol = 'SASL_PLAINTEXT' # Use first user as SASL user/pass for up in defconf.get('sasl_users', '').split(','): u, p = up.split('=') os.write(fd, ('sasl.username=%s\n' % u).encode('ascii')) os.write(fd, ('sasl.password=%s\n' % p).encode('ascii')) break else: print('# FIXME: SASL %s client config not written to %s' % (mech, test_conf_file)) # SSL support ssl = getattr(cluster, 'ssl', None) if ssl is not None: if 'SASL' in security_protocol: security_protocol = 'SASL_SSL' else: security_protocol = 'SSL' key, req, pem = ssl.create_key('librdkafka') os.write(fd, ('ssl.ca.location=%s\n' % ssl.ca_cert).encode('ascii')) os.write(fd, ('ssl.certificate.location=%s\n' % pem).encode('ascii')) os.write(fd, ('ssl.key.location=%s\n' % key).encode('ascii')) os.write(fd, ('ssl.key.password=%s\n' % ssl.conf.get('ssl_key_pass')).encode('ascii')) # Define bootstrap brokers based on selected security protocol print('# Using client security.protocol=%s' % security_protocol) all_listeners = (','.join(cluster.get_all('listeners', '', KafkaBrokerApp))).split(',') bootstrap_servers = ','.join( [x for x in all_listeners if x.startswith(security_protocol)]) os.write(fd, ('bootstrap.servers=%s\n' % bootstrap_servers).encode('ascii')) os.write(fd, ('security.protocol=%s\n' % security_protocol).encode('ascii')) os.close(fd) if deploy: print('# Deploying cluster') cluster.deploy() else: print('# Not deploying') print('# Starting cluster, instance path %s' % cluster.instance_path()) cluster.start() print('# Waiting for brokers to come up') if not cluster.wait_operational(30): cluster.stop(force=True) raise Exception('Cluster %s did not go operational, see logs in %s/%s' % \ (cluster.name, cluster.root_path, cluster.instance)) print('# Connect to cluster with bootstrap.servers %s' % bootstrap_servers) cmd_env = 'export KAFKA_PATH="%s" RDKAFKA_TEST_CONF="%s" ZK_ADDRESS="%s" BROKERS="%s" TEST_KAFKA_VERSION="%s" TRIVUP_ROOT="%s"; ' % \ (brokers[0].conf.get('destdir'), test_conf_file, zk_address, bootstrap_servers, version, cluster.instance_path()) if not cmd: cmd = 'bash --rcfile <(cat ~/.bashrc; echo \'PS1="[TRIVUP:%s@%s] \\u@\\h:\w$ "\')' % ( cluster.name, version) for i in range(0, exec_cnt): subprocess.call('%s %s' % (cmd_env, cmd), shell=True, executable='/bin/bash') try: os.remove(test_conf_file) except: pass cluster.stop(force=True) cluster.cleanup(keeptypes=['log']) return True
def test_version (version, cmd=None, deploy=True, conf={}, debug=False, exec_cnt=1, root_path='tmp', broker_cnt=3, scenario='default'): """ @brief Create, deploy and start a Kafka cluster using Kafka \p version Then run librdkafka's regression tests. """ print('## Test version %s' % version) cluster = Cluster('LibrdkafkaTestCluster', root_path, debug=debug) # Enable SSL if desired if 'SSL' in conf.get('security.protocol', ''): cluster.ssl = SslApp(cluster, conf) # One ZK (from Kafka repo) zk1 = ZookeeperApp(cluster) zk_address = zk1.get('address') # Start Kerberos KDC if GSSAPI is configured if 'GSSAPI' in args.conf.get('sasl_mechanisms', []): KerberosKdcApp(cluster, 'MYREALM').start() defconf = {'version': version} defconf.update(conf) print('conf: ', defconf) brokers = [] for n in range(0, broker_cnt): # Configure rack & replica selector if broker supports fetch-from-follower if version_as_number(version) >= 2.4: defconf.update({'conf': ['broker.rack=RACK${appid}', 'replica.selector.class=org.apache.kafka.common.replica.RackAwareReplicaSelector']}) brokers.append(KafkaBrokerApp(cluster, defconf)) cmd_env = os.environ.copy() # Generate test config file security_protocol='PLAINTEXT' fd, test_conf_file = tempfile.mkstemp(prefix='test_conf', text=True) os.write(fd, ('test.sql.command=sqlite3 rdktests\n').encode('ascii')) os.write(fd, 'broker.address.family=v4\n'.encode('ascii')) if version.startswith('0.9') or version.startswith('0.8'): os.write(fd, 'api.version.request=false\n'.encode('ascii')) os.write(fd, ('broker.version.fallback=%s\n' % version).encode('ascii')) # SASL (only one mechanism supported) mech = defconf.get('sasl_mechanisms', '').split(',')[0] if mech != '': os.write(fd, ('sasl.mechanisms=%s\n' % mech).encode('ascii')) if mech == 'PLAIN' or mech.find('SCRAM') != -1: print('# Writing SASL %s client config to %s' % (mech, test_conf_file)) security_protocol='SASL_PLAINTEXT' # Use first user as SASL user/pass for up in defconf.get('sasl_users', '').split(','): u,p = up.split('=') os.write(fd, ('sasl.username=%s\n' % u).encode('ascii')) os.write(fd, ('sasl.password=%s\n' % p).encode('ascii')) break elif mech == 'OAUTHBEARER': security_protocol='SASL_PLAINTEXT' os.write(fd, ('enable.sasl.oauthbearer.unsecure.jwt=true\n')) os.write(fd, ('sasl.oauthbearer.config=%s\n' % \ 'scope=requiredScope principal=admin').encode('ascii')) else: print('# FIXME: SASL %s client config not written to %s' % (mech, test_conf_file)) # SSL support ssl = getattr(cluster, 'ssl', None) if ssl is not None: if 'SASL' in security_protocol: security_protocol = 'SASL_SSL' else: security_protocol = 'SSL' key = ssl.create_cert('librdkafka') os.write(fd, ('ssl.ca.location=%s\n' % ssl.ca['pem']).encode('ascii')) os.write(fd, ('ssl.certificate.location=%s\n' % key['pub']['pem']).encode('ascii')) os.write(fd, ('ssl.key.location=%s\n' % key['priv']['pem']).encode('ascii')) os.write(fd, ('ssl.key.password=%s\n' % key['password']).encode('ascii')) for k, v in ssl.ca.iteritems(): cmd_env['RDK_SSL_ca_{}'.format(k)] = v # Set envs for all generated keys so tests can find them. for k, v in key.iteritems(): if type(v) is dict: for k2, v2 in v.iteritems(): # E.g. "RDK_SSL_priv_der=path/to/librdkafka-priv.der" cmd_env['RDK_SSL_{}_{}'.format(k, k2)] = v2 else: cmd_env['RDK_SSL_{}'.format(k)] = v # Define bootstrap brokers based on selected security protocol print('# Using client security.protocol=%s' % security_protocol) all_listeners = (','.join(cluster.get_all('listeners', '', KafkaBrokerApp))).split(',') bootstrap_servers = ','.join([x for x in all_listeners if x.startswith(security_protocol)]) os.write(fd, ('bootstrap.servers=%s\n' % bootstrap_servers).encode('ascii')) os.write(fd, ('security.protocol=%s\n' % security_protocol).encode('ascii')) os.close(fd) if deploy: print('# Deploying cluster') cluster.deploy() else: print('# Not deploying') print('# Starting cluster, instance path %s' % cluster.instance_path()) cluster.start() print('# Waiting for brokers to come up') if not cluster.wait_operational(30): cluster.stop(force=True) raise Exception('Cluster %s did not go operational, see logs in %s/%s' % \ (cluster.name, cluster.root_path, cluster.instance)) print('# Connect to cluster with bootstrap.servers %s' % bootstrap_servers) cmd_env['KAFKA_PATH'] = brokers[0].conf.get('destdir') cmd_env['RDKAFKA_TEST_CONF'] = test_conf_file cmd_env['ZK_ADDRESS'] = zk_address cmd_env['BROKERS'] = bootstrap_servers cmd_env['TEST_KAFKA_VERSION'] = version cmd_env['TRIVUP_ROOT'] = cluster.instance_path() cmd_env['TEST_SCENARIO'] = scenario # Per broker env vars for b in [x for x in cluster.apps if isinstance(x, KafkaBrokerApp)]: cmd_env['BROKER_ADDRESS_%d' % b.appid] = b.conf['address'] # Add each broker pid as an env so they can be killed indivdidually. cmd_env['BROKER_PID_%d' % b.appid] = str(b.proc.pid) # JMX port, if available jmx_port = b.conf.get('jmx_port', None) if jmx_port is not None: cmd_env['BROKER_JMX_PORT_%d' % b.appid] = str(jmx_port) if not cmd: cmd_env['PS1'] = '[TRIVUP:%s@%s] \\u@\\h:\w$ ' % (cluster.name, version) cmd = 'bash --rcfile <(cat ~/.bashrc)' ret = True for i in range(0, exec_cnt): retcode = subprocess.call(cmd, env=cmd_env, shell=True, executable='/bin/bash') if retcode != 0: print('# Command failed with returncode %d: %s' % (retcode, cmd)) ret = False try: os.remove(test_conf_file) except: pass cluster.stop(force=True) cluster.cleanup(keeptypes=['log']) return ret
def test_kerberos_cross_realm(): """ Test Kerberos cross-realm trusts """ topic = "test" cluster = Cluster('KafkaCluster', root_path=os.environ.get('TRIVUP_ROOT', 'tmp'), debug=True) ZookeeperApp(cluster) # # Create KDCs for each realm. # First realm will be the default / broker realm. # realm_cnt = 2 realms = ["REALM{}.COM".format(x + 1) for x in range(0, realm_cnt)] # Pre-Allocate ports for the KDCs so they can reference eachother # in the krb5.conf configuration. kdc_ports = {x: TcpPortAllocator(cluster).next("dummy") for x in realms} # Set up realm=kdc:port cross-realm mappings cross_realms = ",".join(["{}={}:{}".format(x, cluster.get_node().name, kdc_ports[x]) for x in realms]) kdcs = dict() for realm in realms: kdc = KerberosKdcApp(cluster, realm, conf={'port': kdc_ports[realm], 'cross_realms': cross_realms, 'renew_lifetime': '30', 'ticket_lifetime': '120'}) kdc.start() kdcs[realm] = kdc broker_realm = realms[0] client_realm = realms[1] broker_kdc = kdcs[broker_realm] client_kdc = kdcs[client_realm] # Create broker_cnt brokers broker_cnt = 4 brokerconf = {'replication_factor': min(3, int(broker_cnt)), 'num_partitions': broker_cnt * 2, 'version': '2.2.0', 'sasl_mechanisms': 'GSSAPI', 'realm': broker_realm, 'conf': ['connections.max.idle.ms=60000']} brokers = dict() for n in range(0, broker_cnt): broker = KafkaBrokerApp(cluster, brokerconf) brokers[broker.appid] = broker # Get bootstrap server list security_protocol = 'SASL_PLAINTEXT' all_listeners = (','.join(cluster.get_all( 'listeners', '', KafkaBrokerApp))).split(',') bootstrap_servers = ','.join([x for x in all_listeners if x.startswith(security_protocol)]) assert len(bootstrap_servers) > 0, "no bootstrap servers" print("## Deploying cluster") cluster.deploy() print("## Starting cluster") cluster.start(timeout=30) # Add cross-realm TGTs for realm in realms: for crealm in [x for x in realms if x != realm]: kdcs[realm].execute('kadmin.local -d "{}" -q "addprinc -requires_preauth -pw password krbtgt/{}@{}"'.format(kdcs[realm].conf.get('dbpath'), crealm, realm)).wait() kdcs[realm].execute('kadmin.local -d "{}" -q "addprinc -requires_preauth -pw password krbtgt/{}@{}"'.format(kdcs[realm].conf.get('dbpath'), realm, crealm)).wait() # Create client base configuration client_config = { 'bootstrap.servers': bootstrap_servers, 'enable.sparse.connections': False, 'broker.address.family': 'v4', 'sasl.mechanisms': 'GSSAPI', 'security.protocol': security_protocol, 'debug': 'broker,security' } os.environ['KRB5CCNAME'] = client_kdc.mkpath('krb5cc') os.environ['KRB5_CONFIG'] = client_kdc.conf['krb5_conf'] os.environ['KRB5_KDC_PROFILE'] = client_kdc.conf['kdc_conf'] principal,keytab = client_kdc.add_principal("admin") client_config['sasl.kerberos.keytab'] = keytab client_config['sasl.kerberos.principal'] = principal.split('@')[0] client_config['sasl.kerberos.min.time.before.relogin'] = 120*1000*3 print(client_config) print("bootstraps: {}".format(client_config['bootstrap.servers'])) p = Producer(client_config) time.sleep(10) for n in range(1, 100): p.produce(topic, "msg #{}".format(n)) p.poll(1.0) p.flush(1.0) print("####### {} messages remaining\n\n\n".format(len(p))) start = time.time() end = start + (90*60) until = start + (12*60) while time.time() < end: now = time.time() if until < now: print("### Producing 2 messages") for n in range(1, 2): p.produce(topic, "msg #{}".format(n)) until = now + (12*60) p.poll(1.0) del p cluster.stop()