def test_lifecycle(self, security_protocol, new_consumer=True, sasl_mechanism='GSSAPI'): """Check that console consumer starts/stops properly, and that we are capturing log output.""" self.kafka.security_protocol = security_protocol self.kafka.client_sasl_mechanism = sasl_mechanism self.kafka.interbroker_sasl_mechanism = sasl_mechanism self.kafka.start() self.consumer.security_protocol = security_protocol self.consumer.new_consumer = new_consumer t0 = time.time() self.consumer.start() node = self.consumer.nodes[0] wait_until(lambda: self.consumer.alive(node), timeout_sec=10, backoff_sec=.2, err_msg="Consumer was too slow to start") self.logger.info("consumer started in %s seconds " % str(time.time() - t0)) # Verify that log output is happening wait_until(lambda: file_exists(node, ConsoleConsumer.LOG_FILE), timeout_sec=10, err_msg="Timed out waiting for consumer log file to exist.") wait_until(lambda: line_count(node, ConsoleConsumer.LOG_FILE) > 0, timeout_sec=1, backoff_sec=.25, err_msg="Timed out waiting for log entries to start.") # Verify no consumed messages assert line_count(node, ConsoleConsumer.STDOUT_CAPTURE) == 0 self.consumer.stop_node(node)
def test_lifecycle(self, security_protocol, sasl_mechanism='GSSAPI'): """Check that console consumer starts/stops properly, and that we are capturing log output.""" self.kafka.security_protocol = security_protocol self.kafka.client_sasl_mechanism = sasl_mechanism self.kafka.interbroker_sasl_mechanism = sasl_mechanism self.kafka.start() self.consumer.security_protocol = security_protocol t0 = time.time() self.consumer.start() node = self.consumer.nodes[0] wait_until(lambda: self.consumer.alive(node), timeout_sec=20, backoff_sec=.2, err_msg="Consumer was too slow to start") self.logger.info("consumer started in %s seconds " % str(time.time() - t0)) # Verify that log output is happening wait_until(lambda: file_exists(node, ConsoleConsumer.LOG_FILE), timeout_sec=10, err_msg="Timed out waiting for consumer log file to exist.") wait_until(lambda: line_count(node, ConsoleConsumer.LOG_FILE) > 0, timeout_sec=1, backoff_sec=.25, err_msg="Timed out waiting for log entries to start.") # Verify no consumed messages assert line_count(node, ConsoleConsumer.STDOUT_CAPTURE) == 0 self.consumer.stop_node(node)
def test_lifecycle(self, security_protocol, new_consumer=True): """Check that console consumer starts/stops properly, and that we are capturing log output.""" self.kafka.security_protocol = security_protocol self.kafka.start() self.consumer.security_protocol = security_protocol self.consumer.new_consumer = new_consumer t0 = time.time() self.consumer.start() node = self.consumer.nodes[0] wait_until(lambda: self.consumer.alive(node), timeout_sec=10, backoff_sec=.2, err_msg="Consumer was too slow to start") self.logger.info("consumer started in %s seconds " % str(time.time() - t0)) # Verify that log output is happening wait_until(lambda: file_exists(node, ConsoleConsumer.LOG_FILE), timeout_sec=10, err_msg="Timed out waiting for logging to start.") assert line_count(node, ConsoleConsumer.LOG_FILE) > 0 # Verify no consumed messages assert line_count(node, ConsoleConsumer.STDOUT_CAPTURE) == 0 self.consumer.stop_node(node)
def test_lifecycle(self): t0 = time.time() self.consumer.start() node = self.consumer.nodes[0] wait_until(lambda: self.consumer.alive(node), timeout_sec=10, backoff_sec=.2, err_msg="Consumer was too slow to start") self.logger.info("consumer started in %s seconds " % str(time.time() - t0)) # Verify that log output is happening wait_until(lambda: file_exists(node, ConsoleConsumer.LOG_FILE), timeout_sec=10, err_msg="Timed out waiting for logging to start.") assert line_count(node, ConsoleConsumer.LOG_FILE) > 0 # Verify no consumed messages assert line_count(node, ConsoleConsumer.STDOUT_CAPTURE) == 0 self.consumer.stop_node(node)
def test_lifecycle(self, security_protocol, new_consumer): self.kafka = KafkaService( self.test_context, num_nodes=1, zk=self.zk, security_protocol=security_protocol, topics={self.topic: { "partitions": 1, "replication-factor": 1 }}) self.kafka.start() t0 = time.time() self.consumer = ConsoleConsumer(self.test_context, num_nodes=1, kafka=self.kafka, topic=self.topic, security_protocol=security_protocol, new_consumer=new_consumer) self.consumer.start() node = self.consumer.nodes[0] wait_until(lambda: self.consumer.alive(node), timeout_sec=10, backoff_sec=.2, err_msg="Consumer was too slow to start") self.logger.info("consumer started in %s seconds " % str(time.time() - t0)) # Verify that log output is happening wait_until(lambda: file_exists(node, ConsoleConsumer.LOG_FILE), timeout_sec=10, err_msg="Timed out waiting for logging to start.") assert line_count(node, ConsoleConsumer.LOG_FILE) > 0 # Verify no consumed messages assert line_count(node, ConsoleConsumer.STDOUT_CAPTURE) == 0 self.consumer.stop_node(node)
def _worker(self, idx, node): node.account.ssh("mkdir -p %s" % VerifiableProducer.PERSISTENT_ROOT, allow_fail=False) # Create and upload log properties log_config = self.render('tools_log4j.properties', log_file=VerifiableProducer.LOG_FILE) node.account.create_file(VerifiableProducer.LOG4J_CONFIG, log_config) # Configure security self.security_config = self.kafka.security_config.client_config( node=node) self.security_config.setup_node(node) # Create and upload config file producer_prop_file = self.prop_file(node) if self.acks is not None: self.logger.info( "VerifiableProducer (index = %d) will use acks = %s", idx, self.acks) producer_prop_file += "\nacks=%s\n" % self.acks self.logger.info("verifiable_producer.properties:") self.logger.info(producer_prop_file) node.account.create_file(VerifiableProducer.CONFIG_FILE, producer_prop_file) cmd = self.start_cmd(node, idx) self.logger.debug("VerifiableProducer %d command: %s" % (idx, cmd)) self.produced_count[idx] = 0 last_produced_time = time.time() prev_msg = None node.account.ssh(cmd) # Ensure that STDOUT_CAPTURE exists before try to read from it # Note that if max_messages is configured, it's possible for the process to exit before this # wait_until condition is checked start = time.time() wait_until( lambda: node.account.isfile(VerifiableProducer.STDOUT_CAPTURE) and line_count(node, VerifiableProducer.STDOUT_CAPTURE) > 0, timeout_sec=10, err_msg="%s: VerifiableProducer took too long to start" % node.account) self.logger.debug("%s: VerifiableProducer took %s seconds to start" % (node.account, time.time() - start)) with node.account.open(VerifiableProducer.STDOUT_CAPTURE, 'r') as f: while True: line = f.readline() if line == '' and not self.alive(node): # The process is gone, and we've reached the end of the output file, so we don't expect # any more output to appear in the STDOUT_CAPTURE file break line = line.strip() data = self.try_parse_json(line) if data is not None: with self.lock: if data["name"] == "producer_send_error": data["node"] = idx self.not_acked_values.append( self.message_validator(data["value"])) self.produced_count[idx] += 1 elif data["name"] == "producer_send_success": self.acked_values.append( self.message_validator(data["value"])) self.produced_count[idx] += 1 # Log information if there is a large gap between successively acknowledged messages t = time.time() time_delta_sec = t - last_produced_time if time_delta_sec > 2 and prev_msg is not None: self.logger.debug( "Time delta between successively acked messages is large: " + "delta_t_sec: %s, prev_message: %s, current_message: %s" % (str(time_delta_sec), str(prev_msg), str(data))) last_produced_time = t prev_msg = data elif data["name"] == "shutdown_complete": if node in self.clean_shutdown_nodes: raise Exception( "Unexpected shutdown event from producer, already shutdown. Producer index: %d" % idx) self.clean_shutdown_nodes.add(node)