def test_Ssl(): cluster = Cluster('TestCluster', 'tmp', debug=True) # SSL App ssl = SslApp(cluster) a = ssl.create_keystore('mybroker') print('created keystore: %s' % (a, )) r = ssl.create_cert('myclient') print('created key: %s' % r) r = ssl.create_cert('selfsigned_myclient', with_ca=False) print('created key: %s' % r) r = ssl.create_cert('intermediate_myclient', through_intermediate=True) print('created key: %s' % r) r = ssl.create_cert('selfsigned_intermediate_myclient', with_ca=False, through_intermediate=True) print('created key: %s' % r) cluster.cleanup()
#!/usr/bin/env python from trivup.trivup import Cluster from trivup.apps.SslApp import SslApp if __name__ == '__main__': cluster = Cluster('TestCluster', 'tmp', debug=True) # SSL App ssl = SslApp(cluster) a = ssl.create_keystore('mybroker') print('created keystore: %s' % a) r = ssl.create_cert('myclient') print('created key: %s' % r) cluster.cleanup(keeptypes=[])
class KafkaCluster(object): # conf dict structure with defaults: # commented-out fields are not defaults but show what is available. default_conf = { 'version': '2.8.0', # Apache Kafka version 'cp_version': '6.1.0', # Confluent Platform version (for SR) 'broker_cnt': 3, 'sasl_mechanism': '', # GSSAPI, PLAIN, SCRAM-.., ... 'realm_cnt': 1, 'krb_renew_lifetime': 30, 'krb_ticket_lifetime': 120, # KRaft (No zookeeper). Requires AK >=2.8 'kraft': False, # SASL PLAIN/SCRAM 'sasl_users': 'testuser=testpass', # With SSL 'with_ssl': False, # With SchemaRegistry 'with_sr': False, # Debug trivup 'debug': False, # Cleanup 'cleanup': True, 'oidc': False, # Additional broker server.properties configuration # 'broker_conf': ['connections.max.idle.ms=1234', ..] } def __init__(self, **kwargs): """ Create and start a KafkaCluster. See default_conf above for parameters. """ super(KafkaCluster, self).__init__() conf = kwargs self.conf = deepcopy(self.default_conf) if conf is not None: self.conf.update(conf) self.version = self.conf.get('version') self.version_num = [int(x) for x in self.version.split('.')][:3] self.kraft = self.conf.get('kraft') # Create trivup Cluster self.cluster = Cluster(self.__class__.__name__, os.environ.get( 'TRIVUP_ROOT', 'tmp-%s' % self.__class__.__name__), debug=bool(self.conf.get('debug', False)), cleanup=bool(self.conf.get('cleanup', True))) self._client_conf = dict() self.env = dict() self.sasl_mechanism = self.conf.get('sasl_mechanism') # Add OIDC server app if bool(self.conf.get('oidc', False)): self.oidc = OauthbearerOIDCApp(self.cluster) if not self.sasl_mechanism: self.sasl_mechanism = 'OAUTHBEARER' elif self.sasl_mechanism.upper() != 'OAUTHBEARER': raise RuntimeError( f"OIDC requires sasl.mechanism OAUTHBEARER, not '{self.sasl_mechanism}'" ) # Generate SSL certs if enabled if bool(self.conf.get('with_ssl')): self.ssl = SslApp(self.cluster, self.conf) else: self.ssl = None # Map mechanism and SSL to security protocol self.security_protocol = { (True, True): 'SASL_SSL', (True, False): 'SASL_PLAINTEXT', (False, True): 'SSL', (False, False): 'PLAINTEXT' }[(bool(self.sasl_mechanism), bool(self.ssl is not None))] if not self.kraft: # Create single ZK for the cluster (don't start yet) self.zk = ZookeeperApp(self.cluster) else: self.zk = None # Allocate (but don't use) a dummy appid so that the brokers get # the same appid/nodeid for both KRaft and ZK modes. Allocator(self.cluster).next() # Broker configuration broker_cnt = int(self.conf.get('broker_cnt')) self.broker_conf = { 'replication_factor': min(3, broker_cnt), 'num_partitions': 4, 'version': self.version, 'sasl_mechanisms': self.sasl_mechanism, 'sasl_users': self.conf.get('sasl_users'), 'conf': self.conf.get('broker_conf', []), 'kafka_path': self.conf.get('kafka_path', None) } # Start Kerberos KDCs if GSSAPI (Kerberos) is configured if self.sasl_mechanism == 'GSSAPI': self._setup_kerberos() self.broker_conf['realm'] = self.broker_realm self.broker_conf['listener_host'] = 'localhost' # Create brokers (don't start yet) self.brokers = dict() for n in range(0, broker_cnt): bconf = copy.deepcopy(self.broker_conf) if self.version_num >= [2, 4, 0]: # Configure rack & replica selector if broker supports # fetch-from-follower bconf.update({ 'conf': [ 'broker.rack=RACK${appid}', 'replica.selector.class=org.apache.kafka.common.replica.RackAwareReplicaSelector' ] }) # noqa: E501 broker = KafkaBrokerApp(self.cluster, bconf) self.brokers[broker.appid] = broker # Generate bootstrap servers list all_listeners = (','.join( self.cluster.get_all('advertised_listeners', '', KafkaBrokerApp))).split(',') self.bootstrap_servers = ','.join( [x for x in all_listeners if x.startswith(self.security_protocol)]) assert len(self.bootstrap_servers) >= broker_cnt, \ "{} < {} expected bootstrap servers".format( len(self.bootstrap_servers), broker_cnt) # Create SchemaRegistry if enabled if bool(self.conf.get('with_sr', False)): self.sr = SchemaRegistryApp( self.cluster, {'version': self.conf.get('cp_version')}) self.env['SR_URL'] = self.sr.get('url') # Create librdkafka client configuration self._setup_client_conf() # Deploy cluster self.cluster.deploy() # Start cluster self.start() def __del__(self): """ Destructor: forcibly stop the cluster """ self.stop(force=True) def _setup_env(self): """ Set up convenience envs """ self.env['KAFKA_PATH'] = self.cluster.find_app(KafkaBrokerApp).get( 'destdir') # noqa: E501 if not self.kraft: self.env['ZK_ADDRESS'] = self.zk.get('address') self.env['BROKERS'] = self.bootstrap_servers self.env['KAFKA_VERSION'] = self.version self.env['TRIVUP_ROOT'] = self.cluster.instance_path() # Add each broker pid as an env so they can be killed indivdidually. for b in self.cluster.find_apps(KafkaBrokerApp, 'started'): self.env['BROKER_PID_%d' % b.appid] = str(b.proc.pid) def _setup_client_conf(self): """ Set up librdkafka client configuration """ self._client_conf['bootstrap.servers'] = self.bootstrap_servers self._client_conf['broker.address.family'] = 'v4' if self.security_protocol != 'PLAINTEXT': self._client_conf['security.protocol'] = self.security_protocol broker_version = self.conf.get('version') brver = broker_version.split('.') if brver[0] == 0 and brver[1] < 10: self._client_conf['broker.version.fallback'] = broker_version self._client_conf['api.version.request'] = 'false' # Client SASL configuration if self.sasl_mechanism: self._client_conf['sasl.mechanism'] = self.sasl_mechanism if self.sasl_mechanism == 'PLAIN' or \ self.sasl_mechanism.find('SCRAM') != -1: # Use first user as SASL user/pass for up in self.conf.get('sasl_users', '').split(','): u, p = up.split('=') self._client_conf['sasl.username'] = u self._client_conf['sasl.password'] = p break elif self.sasl_mechanism == 'OAUTHBEARER': if self.oidc is not None: self._client_conf['sasl.oauthbearer.method'] = 'OIDC' self._client_conf[ 'sasl.oauthbearer.token.endpoint.url'] = self.oidc.get( 'valid_url') # noqa: E501 self._client_conf['sasl.oauthbearer.client.id'] = '123' self._client_conf['sasl.oauthbearer.client.secret'] = 'abc' self._client_conf['sasl.oauthbearer.scope'] = 'test' self._client_conf['sasl.oauthbearer.extensions'] = \ 'ExtensionworkloadIdentity=develC348S,Extensioncluster=lkc123' else: self._client_conf[ 'enable.sasl.oauthbearer.unsecure.jwt'] = True # noqa: E501 self._client_conf['sasl.oauthbearer.config'] = \ 'scope=requiredScope principal=admin' # Client SSL configuration if self.ssl is not None: key = self.ssl.create_cert('client') self._client_conf['ssl.ca.location'] = self.ssl.ca['pem'] self._client_conf['ssl.certificate.location'] = key['pub']['pem'] self._client_conf['ssl.key.location'] = key['priv']['pem'] self._client_conf['ssl.key.password'] = key['password'] # Add envs pointing out locations of the generated certs for k, v in self.ssl.ca.items(): self.env['SSL_ca_{}'.format(k)] = v # Set envs for all generated keys so tests can find them. for k, v in key.items(): if type(v) is dict: for k2, v2 in v.items(): # E.g. "SSL_priv_der=path/to/librdkafka-priv.der" self.env['SSL_{}_{}'.format(k, k2)] = v2 else: self.env['SSL_{}'.format(k)] = v def _setup_kerberos(self): """ Set up Kerberos KDCs """ # Create KDCs for each realm. # First realm will be the default / broker realm. # realm_cnt = int(self.conf.get('realm_cnt', 1)) # No point in having more than two realms assert realm_cnt > 0 and realm_cnt < 3 realms = ['REALM{}.TRIVUP'.format(x + 1) for x in range(0, realm_cnt)] # Pre-Allocate ports for the KDCs so they can reference eachother # in the krb5.conf configuration. kdc_ports = { x: TcpPortAllocator(self.cluster).next("dummy") for x in realms } # Set up realm=kdc:port cross-realm mappings cross_realms = ",".join([ "{}={}:{}".format(x, self.cluster.get_node().name, kdc_ports[x]) for x in realms ]) kdcs = dict() for realm in realms: kdc = KerberosKdcApp(self.cluster, realm, conf={ 'port': kdc_ports[realm], 'cross_realms': cross_realms, 'renew_lifetime': int(self.conf.get('krb_renew_lifetime')), 'ticket_lifetime': int(self.conf.get('krb_ticket_lifetime')) }) # Kerberos needs to be started prior to Kafka so that principals # and keytabs are available at the time of Kafka config generation. kdc.start() kdcs[realm] = kdc self.broker_realm = realms[0] self.client_realm = realms[-1] self.broker_kdc = kdcs[self.broker_realm] self.client_kdc = kdcs[self.client_realm] # Add cross-realm TGTs if realm_cnt > 1: KerberosKdcApp.add_cross_realm_tgts(kdcs) # Add client envs and configuration self.env['KRB5CCNAME'] = self.client_kdc.mkpath('krb5cc') self.env['KRB5_CONFIG'] = self.client_kdc.conf['krb5_conf'] self.env['KRB5_KDC_PROFILE'] = self.client_kdc.conf['kdc_conf'] principal, keytab = self.client_kdc.add_principal('admin') self._client_conf['sasl.kerberos.keytab'] = keytab self._client_conf['sasl.kerberos.principal'] = principal.split('@')[0] # Refresh ticket 60s before renew timeout. self._client_conf['sasl.kerberos.min.time.before.relogin'] = \ max(1, int(self.conf.get('krb_renew_lifetime')) - 60) * 1000 def start(self, timeout=0): """ Start cluster """ self.cluster.start() # Set up additional convenience envs self._setup_env() if timeout > 0: self.wait_operational(timeout) def stop(self, cleanup=True, keeptypes=['log'], force=False, timeout=0): """ Stop cluster and clean up """ self.cluster.stop(force=True) if timeout > 0: self.cluster.wait_stopped(timeout) if cleanup: self.cluster.cleanup(keeptypes) def stopped(self): """ Returns True when all components of the cluster are stopped """ return len([x for x in self.cluster.apps if x.status() == 'stopped']) == len(self.cluster.apps) def wait_operational(self, timeout=60): """ Wait for cluster to go operational """ if not self.cluster.wait_operational(timeout): self.stop(force=True) raise Exception( "Cluster {} did not go operational, see logs in {}/{}".format( self.cluster.name, self.cluster.root_path, self.cluster.instance)) def stop_broker(self, broker_id): """ Stop single broker """ broker = self.brokers.get(broker_id, None) if broker is None: raise LookupError("Unknown broker id {}".format(broker_id)) broker.stop() def start_broker(self, broker_id): """ Start single broker """ broker = self.brokers.get(broker_id, None) if broker is None: raise LookupError("Unknown broker id {}".format(broker_id)) broker.start() def interactive(self, cmd=None): """ Execute an interactive shell that has all the environment variables set. """ if cmd is None: print('# Interactive mode') print("# - Waiting for cluster to go operational in {}/{}".format( self.cluster.root_path, self.cluster.instance)) kc.wait_operational() env = self.env.copy() # Avoids 'dumb' terminal mode env['TERM'] = os.environ.get('TERM', 'vt100') # Write librdkafka client configuration to file. cf_path = self.cluster.mkpath('rdkafka.conf', in_instance=True) self.write_client_conf(cf_path) env['RDKAFKA_TEST_CONF'] = cf_path print("# - Client configuration in {}".format(cf_path)) print("# - Connect to cluster with bootstrap.servers {}".format( self.bootstrap_servers)) # Prefix the standard prompt with cluster info. if cmd is None: pfx = '[TRIVUP:{}@{}] '.format(self.cluster.name, self.version) fullcmd = 'bash --rcfile <(cat ~/.bashrc; echo \'PS1="{}$PS1"\') -i'.format( pfx) # noqa: E501 print("# - You're now in an interactive sub-shell, type 'exit' " "to stop the cluster and exit back to your shell.\n") retcode = subprocess.call(fullcmd, env=env, shell=True, executable='/bin/bash') else: fullcmd = cmd print("# - Executing: {}".format(fullcmd)) retcode = subprocess.call(fullcmd, env=env, shell=True) if retcode != 0: print("# - Shell exited with returncode {}: {}".format( retcode, fullcmd)) def client_conf(self): """ Get a dict copy of the client configuration """ return deepcopy(self._client_conf) def write_client_conf(self, path, additional_blob=None): """ Write client configuration (librdkafka) to @param path """ with open(path, "w") as f: for k, v in self._client_conf.items(): f.write(str('%s=%s\n' % (k, v))) if additional_blob is not None: f.write(str('#\n# Additional configuration:')) f.write(str(additional_blob))