def test_host_network(self): # Test from within the container self.is_kafka_healthy_for_service("kafka-sasl-ssl-1", 19094, 3, "sasl.kafka.com", "SASL_SSL") producer_env = {'KAFKA_ZOOKEEPER_CONNECT': "sasl.kafka.com:22181,sasl.kafka.com:32181,sasl.kafka.com:42181/saslssl", 'KAFKA_OPTS': "-Djava.security.auth.login.config=/etc/kafka/secrets/host_producer_jaas.conf -Djava.security.krb5.conf=/etc/kafka/secrets/host_krb.conf -Dsun.net.spi.nameservice.provider.1=sun -Dsun.security.krb5.debug=true"} producer_logs = utils.run_docker_command( 300, image="confluentinc/cp-kafka", name="kafka-ssl-sasl-host-producer", environment=producer_env, command=PRODUCER.format(brokers="sasl.kafka.com:29094", topic="foo", config="host.producer.ssl.sasl.config", messages=100), host_config={'NetworkMode': 'host', 'Binds': ['/tmp/kafka-cluster-host-test/secrets:/etc/kafka/secrets']}) self.assertTrue("PRODUCED 100 messages" in producer_logs) consumer_env = {'KAFKA_ZOOKEEPER_CONNECT': "sasl.kafka.com:22181,sasl.kafka.com:32181,sasl.kafka.com:42181/saslssl", 'KAFKA_OPTS': "-Djava.security.auth.login.config=/etc/kafka/secrets/host_consumer_jaas.conf -Djava.security.krb5.conf=/etc/kafka/secrets/host_krb.conf -Dsun.net.spi.nameservice.provider.1=sun -Dsun.security.krb5.debug=true"} consumer_logs = utils.run_docker_command( 300, image="confluentinc/cp-kafka", name="kafka-ssl-sasl-host-consumer", environment=consumer_env, command=CONSUMER.format(brokers="sasl.kafka.com:29094", topic="foo", config="host.consumer.ssl.sasl.config", messages=10), host_config={'NetworkMode': 'host', 'Binds': ['/tmp/kafka-cluster-host-test/secrets:/etc/kafka/secrets']}) self.assertTrue("Processed a total of 10 messages" in consumer_logs)
def create_connector(name, create_command, host, port): utils.run_docker_command(image="confluentinc/cp-kafka-connect", command=create_command, host_config={'NetworkMode': 'host'}) status = None for i in xrange(25): source_logs = utils.run_docker_command( image="confluentinc/cp-kafka-connect", command=CONNECTOR_STATUS.format(host=host, port=port, name=name), host_config={'NetworkMode': 'host'}) connector = json.loads(source_logs) # Retry if you see errors, connect might still be creating the connector. if "error_code" in connector: time.sleep(1) else: status = connector["connector"]["state"] if status == "FAILED": return status elif status == "RUNNING": return status elif status == "UNASSIGNED": time.sleep(1) return status
def test_host_network(self): # Test from within the container self.is_kafka_healthy_for_service("kafka-ssl-1", 19093, 3, "localhost", "SSL") # Test from outside the container logs = utils.run_docker_command( image="confluentinc/cp-kafkacat", command=KAFKA_SSL_CHECK.format(host="localhost", port=19093), host_config={'NetworkMode': 'host', 'Binds': ['/tmp/kafka-cluster-host-test/secrets:/etc/kafka/secrets']}) parsed_logs = json.loads(logs) self.assertEquals(3, len(parsed_logs["brokers"])) expected_brokers = [{"id": 1, "name": "localhost:19093"}, {"id": 2, "name": "localhost:29093"}, {"id": 3, "name": "localhost:39093"}] self.assertEquals(sorted(expected_brokers), sorted(parsed_logs["brokers"])) producer_logs = utils.run_docker_command( 300, image="confluentinc/cp-kafka", name="kafka-ssl-host-producer", environment={'KAFKA_ZOOKEEPER_CONNECT': "localhost:22181,localhost:32181,localhost:42181/ssl"}, command=PRODUCER.format(brokers="localhost:29093", topic="foo", config="host.producer.ssl.config", messages=100), host_config={'NetworkMode': 'host', 'Binds': ['/tmp/kafka-cluster-host-test/secrets:/etc/kafka/secrets']}) self.assertTrue("PRODUCED 100 messages" in producer_logs) consumer_logs = utils.run_docker_command( 300, image="confluentinc/cp-kafkacat", name="kafkacat-ssl-host-consumer", command=KAFKACAT_SSL_CONSUMER.format(brokers="localhost:29093", topic="foo", messages=10), host_config={'NetworkMode': 'host', 'Binds': ['/tmp/kafka-cluster-host-test/secrets:/etc/kafka/secrets']}) self.assertEquals("\n".join([str(i + 1) for i in xrange(10)]), consumer_logs.strip())
def test_host_network(self): # Test from within the container self.is_kafka_rest_healthy_for_service("kafka-rest-host") # Test from outside the container logs = utils.run_docker_command(image="confluentinc/cp-kafka-rest", command=HEALTH_CHECK.format( host="localhost", port=8082), host_config={'NetworkMode': 'host'}) self.assertTrue("PASS" in logs) # Test writing a topic and confirm it was written by checking for it logs_2 = utils.run_docker_command(image="confluentinc/cp-kafka-rest", command=POST_TO_TOPIC_CHECK % ("localhost", 8082, "testtopichost"), host_config={'NetworkMode': 'host'}) self.assertTrue("value_schema_id" in logs_2) logs_3 = utils.run_docker_command(image="confluentinc/cp-kafka-rest", command=GET_TOPICS_CHECK.format( host="localhost", port=8082), host_config={'NetworkMode': 'host'}) self.assertTrue("testtopichost" in logs_3)
def create_file_source_test_data(host_dir, file, num_records): volumes = [] volumes.append("%s:/tmp/test" % host_dir) print "VOLUMES : ", volumes utils.run_docker_command( image="confluentinc/cp-kafka-connect", command="bash -c 'rm -rf /tmp/test/*.txt && seq {count} > /tmp/test/{name}'".format(count=num_records, name=file), host_config={'NetworkMode': 'host', 'Binds': volumes})
def test_bridged_network(self): # Test from within the container self.is_c3_healthy_for_service("control-center-bridge", "standalone-network-test_zk") INTERCEPTOR_CLIENTS_CMD = """bash -xc '\ export TOPIC="{topic}" \ export MESSAGES="{messages}" \ export CHECK_MESSAGES="{check_messages}" cub kafka-ready 1 40 -z "$ZOOKEEPER_CONNECT" \ && control-center-run-class kafka.admin.TopicCommand --create --topic "$TOPIC" --partitions 1 --replication-factor 1 --zookeeper "$ZOOKEEPER_CONNECT" \ && echo "interceptor.classes=io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor" > /tmp/producer.config \ && echo "acks=all" >> /tmp/producer.config \ && seq "$MESSAGES" | control-center-run-class kafka.tools.ConsoleProducer --broker-list "$BOOTSTRAP_SERVERS" --topic "$TOPIC" --producer.config /tmp/producer.config \ && echo PRODUCED "$MESSAGES" messages. \ && echo "interceptor.classes=io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor" > /tmp/consumer.config \ && control-center-run-class kafka.tools.ConsoleConsumer --bootstrap-server "$BOOTSTRAP_SERVERS" --topic "$TOPIC" --new-consumer --from-beginning --max-messages "$CHECK_MESSAGES" --consumer.config /tmp/consumer.config' """ MESSAGES = 10000 TOPIC = 'test-topic' # Run producer and consumer with interceptor to generate monitoring data out = utils.run_docker_command( image="confluentinc/cp-control-center", # TODO - check that all messages are read back when this bug is fixed - https://issues.apache.org/jira/browse/KAFKA-3993 command=INTERCEPTOR_CLIENTS_CMD.format(topic=TOPIC, messages=MESSAGES, check_messages=MESSAGES // 2), host_config={'NetworkMode': 'standalone-network-test_zk'}, environment={'ZOOKEEPER_CONNECT': 'zookeeper-bridge:2181', 'BOOTSTRAP_SERVERS': 'kafka-bridge:19092'} ) self.assertTrue("PRODUCED %s messages" % MESSAGES in out) # Check that data was processed # Calculate last hour and next hour in case we cross the border now_unix = int(time.time()) prev_hr_start_unix = now_unix - now_unix % 3600 next_hr_start_unix = prev_hr_start_unix + 2 * 3600 FETCH_MONITORING_DATA_CMD = """curl -s -H 'Content-Type: application/json' 'http://{host}:{port}/1.0/monitoring/consumer_groups?startTimeMs={start}&stopTimeMs={stop}&rollup=ONE_HOUR'""" cmd = FETCH_MONITORING_DATA_CMD.format(host="control-center-bridge", port=9021, start=prev_hr_start_unix * 1000, stop=next_hr_start_unix * 1000) fetch_cmd_args = { 'image': "confluentinc/cp-control-center", 'command': cmd, 'host_config': {'NetworkMode': 'standalone-network-test_zk'}, } attempts = 0 while attempts <= 60: attempts += 1 out = json.loads(utils.run_docker_command(**fetch_cmd_args)) if 'error_code' in out and out['error_code'] == 404: time.sleep(5) continue else: self.assertTrue('sources' in out) self.assertEquals(1, len(out['sources'])) self.assertEquals(TOPIC, out['sources'][0]['topic']) break
def is_c3_healthy_for_service(cls, service): output = utils.run_docker_command( 600, image=IMAGE_NAME, command=C3_CHECK.format(host=service, port=9021), host_config={'NetworkMode': 'config-test_default'}) assert "PASS" in output
def test_jmx_bridged_network(self): # Test from outside the container logs = utils.run_docker_command( image="confluentinc/cp-jmxterm", command=JMX_CHECK.format(jmx_hostname="kafka-bridged-jmx", jmx_port="9999"), host_config={'NetworkMode': 'standalone-network-test_zk'}) self.assertTrue("Version = 0.10.2.0-cp1;" in logs)
def test_jmx_host_network(self): # Test from outside the container logs = utils.run_docker_command( image="confluentinc/cp-jmxterm", command=JMX_CHECK.format(jmx_hostname="localhost", jmx_port="39999"), host_config={'NetworkMode': 'host'}) self.assertTrue("Version = 0.10.2.0-cp1;" in logs)
def is_c3_healthy_for_service(cls, service, network): output = utils.run_docker_command( 600, image="confluentinc/cp-control-center", command=C3_CHECK.format(host=service, port=9021), host_config={'NetworkMode': network} ) assert "PASS" in output
def test_host_network(self): # Test from within the container self.is_zk_healthy_for_service("host-network", 32181) # Test from outside the container logs = utils.run_docker_command(image="confluentinc/cp-zookeeper", command=HEALTH_CHECK.format( port=32181, host="localhost"), host_config={'NetworkMode': 'host'}) self.assertTrue("PASS" in logs)
def test_kafka_rest_commands(self): expected = "org.I0Itec.zkclient.exception.ZkTimeoutException: Unable to connect to zookeeper server 'localhost:2181' with timeout of 30000 ms" self.assertTrue(expected in utils.run_docker_command( image=self.image, command="kafka-rest-start", environment={ "KAFKA_REST_ZOOKEEPER_CONNECT": "nothing", "KAFKA_REST_HOST_NAME": "yolo" }))
def test_schema_registry_commands(self): expected = "USAGE: /usr/bin/schema-registry-start [-daemon] schema-registry.properties" self.assertTrue(expected in utils.run_docker_command( image=self.image, command="schema-registry-start", environment={ "SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL": "who cares", "SCHEMA_REGISTRY_HOST_NAME": "just anything really" }))
def test_host_network(self): # Test from within the container self.is_schema_registry_healthy_for_service("schema-registry-host") # Test from outside the container logs = utils.run_docker_command( image="confluentinc/cp-schema-registry", command=HEALTH_CHECK.format(host="localhost", port=8081), host_config={'NetworkMode': 'host'}) self.assertTrue("PASS" in logs)
def test_jmx_bridged_network(self): # Test from outside the container logs = utils.run_docker_command( image="confluentinc/cp-jmxterm", command=JMX_CHECK.format(jmx_hostname="bridge-network-jmx", jmx_port="9999"), host_config={'NetworkMode': 'standalone-network-test_zk'}) self.assertTrue( "Version = 3.4.6-1569965, built on 02/20/2014 09:09 GMT;" in logs)
def test_jmx_host_network(self): # Test from outside the container logs = utils.run_docker_command(image="confluentinc/cp-jmxterm", command=JMX_CHECK.format( client_port=52181, jmx_hostname="localhost", jmx_port="39999"), host_config={'NetworkMode': 'host'}) self.assertTrue( "Version = 3.4.8--1, built on 02/06/2016 03:18 GMT;" in logs)
def test_jmx_bridged_network(self): self.is_kafka_rest_healthy_for_service("kafka-rest-bridged-jmx") # Test from outside the container logs = utils.run_docker_command( image="confluentinc/cp-jmxterm", command=JMX_CHECK.format(jmx_hostname="kafka-rest-bridged-jmx", jmx_port=9999), host_config={'NetworkMode': 'standalone-network-test_zk'}) self.assertTrue("connections-active =" in logs)
def test_jmx_bridged_network(self): # Test from outside the container logs = utils.run_docker_command( image="confluentinc/cp-jmxterm", command=JMX_CHECK.format(client_port=2181, jmx_hostname="bridge-network-jmx", jmx_port="9999"), host_config={'NetworkMode': 'standalone-network-test_zk'}) self.assertTrue( "Version = 3.4.9-1757313, built on 08/23/2016 06:50 GMT;" in logs)
def test_jmx_host_network(self): self.is_kafka_rest_healthy_for_service("kafka-rest-host-jmx", 28082) # Test from outside the container logs = utils.run_docker_command(image="confluentinc/cp-jmxterm", command=JMX_CHECK.format( jmx_hostname="localhost", jmx_port=39999), host_config={'NetworkMode': 'host'}) self.assertTrue("connections-active =" in logs)
def test_bridged_network(self): # Test from within the container self.is_schema_registry_healthy_for_service("schema-registry-bridge", 18081) # Test from outside the container on host network logs = utils.run_docker_command( image="confluentinc/cp-schema-registry", command=HEALTH_CHECK.format(host="localhost", port=18081), host_config={'NetworkMode': 'host'}) self.assertTrue("PASS" in logs) # Test from outside the container on bridge network logs_2 = utils.run_docker_command( image="confluentinc/cp-schema-registry", command=HEALTH_CHECK.format(host="schema-registry-bridge", port=18081), host_config={'NetworkMode': 'standalone-network-test_zk'}) self.assertTrue("PASS" in logs_2)
def test_bridge_network(self): # Test from within the container self.is_kafka_healthy_for_service("kafka-sasl-ssl-1", 9094, 3, "kafka-sasl-ssl-1", "SASL_SSL") # FIXME: Figure out how to use kafkacat with SASL/Kerberos # Test from outside the container # logs = utils.run_docker_command( # image="confluentinc/cp-kafkacat", # name="bridged-kafkacat", # command=KAFKA_SASL_SSL_CHECK.format(host="kafka-sasl-ssl-1", port=9094, broker_principal="kafka", client_principal="bridged_kafkacat", client_host="bridged-kafkacat"), # host_config={'NetworkMode': 'cluster-test_zk', 'Binds': ['/tmp/kafka-cluster-bridge-test/secrets:/etc/kafka/secrets', '/tmp/kafka-cluster-bridge-test/secrets/bridged_krb.conf:/etc/krb5.conf']}) # # parsed_logs = json.loads(logs) # self.assertEquals(3, len(parsed_logs["brokers"])) # expected_brokers = [{"id": 1, "name": "kafka-sasl-ssl-1:9094"}, {"id": 2, "name": "kafka-sasl-ssl-2:9094"}, {"id": 3, "name": "kafka-sasl-ssl-3:9094"}] # self.assertEquals(sorted(expected_brokers), sorted(parsed_logs["brokers"])) producer_env = {'KAFKA_ZOOKEEPER_CONNECT': "zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181/saslssl", 'KAFKA_OPTS': "-Djava.security.auth.login.config=/etc/kafka/secrets/bridged_producer_jaas.conf -Djava.security.krb5.conf=/etc/kafka/secrets/bridged_krb.conf -Dsun.net.spi.nameservice.provider.1=sun -Dsun.security.krb5.debug=true"} producer_logs = utils.run_docker_command( 300, image="confluentinc/cp-kafka", name="kafka-sasl-ssl-bridged-producer", environment=producer_env, command=PRODUCER.format(brokers="kafka-sasl-ssl-1:9094", topic="foo", config="bridged.producer.ssl.sasl.config", messages=100), host_config={'NetworkMode': 'cluster-test_zk', 'Binds': ['/tmp/kafka-cluster-bridge-test/secrets:/etc/kafka/secrets']}) self.assertTrue("PRODUCED 100 messages" in producer_logs) consumer_env = {'KAFKA_ZOOKEEPER_CONNECT': "zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181/saslssl", 'KAFKA_OPTS': "-Djava.security.auth.login.config=/etc/kafka/secrets/bridged_consumer_jaas.conf -Djava.security.krb5.conf=/etc/kafka/secrets/bridged_krb.conf -Dsun.net.spi.nameservice.provider.1=sun -Dsun.security.krb5.debug=true"} consumer_logs = utils.run_docker_command( 300, image="confluentinc/cp-kafka", name="kafka-sasl-ssl-bridged-consumer", environment=consumer_env, command=CONSUMER.format(brokers="kafka-sasl-ssl-1:9094", topic="foo", config="bridged.consumer.ssl.sasl.config", messages=10), host_config={'NetworkMode': 'cluster-test_zk', 'Binds': ['/tmp/kafka-cluster-bridge-test/secrets:/etc/kafka/secrets']}) self.assertTrue("Processed a total of 10 messages" in consumer_logs)
def test_host_network(self): # Test from within the container self.is_kafka_healthy_for_service("kafka-host", 29092, 1) # Test from outside the container logs = utils.run_docker_command(image="confluentinc/cp-kafkacat", command=KAFKA_CHECK.format( host="localhost", port=29092), host_config={'NetworkMode': 'host'}) parsed_logs = json.loads(logs) self.assertEquals(1, len(parsed_logs["brokers"])) self.assertEquals(1, parsed_logs["brokers"][0]["id"]) self.assertEquals("localhost:29092", parsed_logs["brokers"][0]["name"])
def test_host_network(self): # Test from within the container self.is_kafka_healthy_for_service("kafka-1", 19092, 3) # Test from outside the container logs = utils.run_docker_command(image="confluentinc/cp-kafkacat", command=KAFKA_CHECK.format( host="localhost", port=19092), host_config={'NetworkMode': 'host'}) parsed_logs = json.loads(logs) self.assertEquals(3, len(parsed_logs["brokers"])) expected_brokers = [{ "id": 1, "name": "localhost:19092" }, { "id": 2, "name": "localhost:29092" }, { "id": 3, "name": "localhost:39092" }] self.assertEquals(sorted(expected_brokers), sorted(parsed_logs["brokers"])) client_logs = utils.run_docker_command( 300, image="confluentinc/cp-kafka", name="kafka-producer", environment={ 'KAFKA_ZOOKEEPER_CONNECT': "localhost:22181,localhost:32181,localhost:42181" }, command=PLAIN_CLIENTS.format(brokers="localhost:19092", topic="foo", messages=100), host_config={'NetworkMode': 'host'}) self.assertTrue("Processed a total of 100 messages" in client_logs)
def test_host_network(self): # Verify access to the containerized application from inside its own container self.is_kafka_music_app_healthy_for_service( "kafka-streams-examples-host", port=37070) # Verify outside access to the containerized application from a new container list_running_app_instances_url = "http://{host}:{port}/kafka-music/instances".format( host="localhost", port=37070) logs = utils.run_docker_command( image="confluentinc/cp-kafka-streams-examples", command=KAFKA_MUSIC_APP_HEALTH_CHECK.format( url=list_running_app_instances_url), host_config={'NetworkMode': 'host'}) self.assertTrue("PASS" in logs)
def test_zookeeper_on_service(self): self.is_zk_healthy_for_service("zookeeper-1", 22182) self.is_zk_healthy_for_service("zookeeper-1", 32182) self.is_zk_healthy_for_service("zookeeper-1", 42182) client_ports = [22182, 32182, 42182] expected = sorted( ["Mode: follower\n", "Mode: follower\n", "Mode: leader\n"]) outputs = [] for port in client_ports: output = utils.run_docker_command( image="confluentinc/cp-zookeeper", command=MODE_COMMAND.format(port=port), host_config={'NetworkMode': 'host'}) outputs.append(output) self.assertEquals(sorted(outputs), expected)
def wait_and_get_sink_output(host_dir, file, expected_num_records): # Polls the output of file sink and tries to wait until an expected no of records appear in the file. volumes = [] volumes.append("%s/:/tmp/test" % host_dir) for i in xrange(60): sink_record_count = utils.run_docker_command( image="confluentinc/cp-kafka-connect", command="bash -c '[ -e /tmp/test/%s ] && (wc -l /tmp/test/%s | cut -d\" \" -f1) || echo -1'" % (file, file), host_config={'NetworkMode': 'host', 'Binds': volumes}) # The bash command returns -1, if the file is not found. otherwise it returns the no of lines in the file. if int(sink_record_count.strip()) == expected_num_records: break time.sleep(10) return int(sink_record_count.strip())
def test_default_config(self): self.is_kafka_healthy_for_service("default-config", 9092, 1) props = self.cluster.run_command_on_service("default-config", "bash -c 'cat /etc/kafka/kafka.properties | sort'") expected = """ advertised.listeners=PLAINTEXT://default-config:9092 listeners=PLAINTEXT://0.0.0.0:9092 log.dirs=/var/lib/kafka/data zookeeper.connect=zookeeper:2181/defaultconfig """ self.assertEquals(props.translate(None, string.whitespace), expected.translate(None, string.whitespace)) logs = utils.run_docker_command( image="confluentinc/cp-kafkacat", command=KAFKA_CHECK.format(host="default-config", port=9092), host_config={'NetworkMode': 'config-test_default'}) parsed_logs = json.loads(logs) expected_brokers = [{"id": 1001, "name": "default-config:9092"}] self.assertEquals(sorted(expected_brokers), sorted(parsed_logs["brokers"]))
def test_adb(self): self.is_kafka_healthy_for_service("kafka-1", 19092, 3) topic = "test-adb-metrics" topic_output = self.cluster.run_command_on_service( "kafka-1", TOPIC_CREATE.format(topic=topic, partitions=10, replicas=3)) assert 'Created topic "%s".' % topic in topic_output record_count = 100000 produce_data_output = self.cluster.run_command_on_service( "kafka-1", GENERATE_PERF_DATA.format(brokers="localhost:19092", topic=topic, record_count=record_count, record_size_bytes=1000, throughput_rps=100000)) assert "%s records sent" % record_count in produce_data_output proposed_assignment_output = self.cluster.run_command_on_service( "kafka-1", ADB_PROPOSED_ASSIGNMENT.format(brokers="localhost:19092")) assert "version" in proposed_assignment_output removed_broker = 1 execute_logs = utils.run_docker_command( 300, # Timeout = 5 mins image="confluentinc/cp-enterprise-kafka", name="adb-execute", environment={ 'KAFKA_ZOOKEEPER_CONNECT': "localhost:22181,localhost:32181,localhost:42181" }, command=ADB_EXECUTE.format(brokers="localhost:19092", throttle_bps=100000000, remove_broker=removed_broker), host_config={'NetworkMode': 'host'}) assert "Rebalance started, its status can be checked via the status command." in execute_logs rebalance_status = self.cluster.run_command_on_service( "kafka-1", ADB_STATUS) rebalance_complete = "" for i in xrange(120): rebalance_complete = self.cluster.run_command_on_service( "kafka-1", ADB_FINISH) if "The rebalance has completed and throttling has been disabled" in rebalance_complete: break sleep(1) assert "The rebalance has completed and throttling has been disabled" in rebalance_complete rebalance_status = self.cluster.run_command_on_service( "kafka-1", ADB_STATUS) assert "No rebalance is currently in progress" in rebalance_status # Verify that the removed broker has no partitions logs = utils.run_docker_command(image="confluentinc/cp-kafkacat", command=KAFKA_CHECK.format( host="localhost", port=19092), host_config={'NetworkMode': 'host'}) parsed_logs = json.loads(logs) topics = parsed_logs["topics"] for t in parsed_logs["topics"]: for p in t["partitions"]: for r in p["replicas"]: assert r["id"] != removed_broker
def test_zk_commands(self): expected = "USAGE: /usr/bin/zookeeper-server-start [-daemon] zookeeper.properties" self.assertTrue(expected in utils.run_docker_command( image=self.image, command="zookeeper-server-start"))
def test_c3_commands(self): expected = "control-center-start: ERROR: Properties file is required" self.assertTrue(expected in utils.run_docker_command( image=self.image, command="control-center-start"))