def _prime_env(self): output_topic = "identity_output2" self.start_redpanda_nodes(3) spec = TopicSpec(name="topic2", partition_count=3, replication_factor=3) self.client().create_topic(spec) self._deploy_identity_copro([spec.name], [output_topic]) self.topic = spec.name self.start_producer(num_nodes=1, throughput=10000) self.start_consumer(1) self.await_startup(min_records=500) materialized_topic = construct_materialized_topic( spec.name, output_topic) def topic_created(): metadata = self.client().describe_topics() self.logger.info(f"metadata: {metadata}") return any([x['topic'] == materialized_topic for x in metadata]) wait_until(topic_created, timeout_sec=30, backoff_sec=2) self._start_mconsumer(materialized_topic) t, p = self._grab_input(spec.name) return { 'topic': t, 'partition': p, 'materialized_topic': materialized_topic }
def verify_filter_test(self): # 1. Fill source topics with test data self.push_test_data_to_inputs() # 2. Start coprocessor self._build_script(self._script) # 3. Drain from output topics within timeout materialized_topic = construct_materialized_topic( self.topic, self._output_topic) output_tps = [ TopicPartition(materialized_topic, i) for i in range(self.topics[0].partition_count) ] consumer = NativeKafkaConsumer(self.redpanda.brokers(), output_tps, self._expected_record_cnt) # Wait until materialized topic is up def topic_created(): topics = self._rpk_tool.list_topics() return materialized_topic in list(topics) wait_until(topic_created, timeout_sec=10, backoff_sec=1) # Consume from materialized topic def finished(): self.logger.info("Recs read: %s" % consumer.results.num_records()) return consumer.is_finished() consumer.start() wait_until(finished, timeout_sec=10, backoff_sec=1) consumer.join() # Assert success assert consumer.results.num_records() == self._expected_record_cnt
def wasm_test_plan(self): input_topic = self.topics[0].name mapped_topic = "myoutputtopic" output_topic = construct_materialized_topic(input_topic, mapped_topic) # The identity transform produces 1 identital record onto a topic for # each input record. The result should be a 1 to 1 mapping between a # source and destination topic, they should be identical when compared basic_script = WasmScript( inputs=[(input_topic, (self._num_records, self._record_size))], outputs=[(output_topic, self._num_records)], script=WasmTemplateRepository.IDENTITY_TRANSFORM) return [basic_script]
def to_output_topic_spec(output_topics): """ Create a list of TopicPartitions for the set of output topics. Must parse the materialzied topic for the input topic to determine the number of partitions. """ result = [] for src, _, _ in topic_spec: materialized_topics = [ TopicSpec(name=construct_materialized_topic( src.name, dest), partition_count=src.partition_count, replication_factor=src.replication_factor, cleanup_policy=src.cleanup_policy) for dest in output_topics ] result += materialized_topics return result
def verify_materialized_topics_test(self): """ Entry point for all tests, asynchronously we perform the following: 1. Scripts are built & deployed 2. Consumers are set-up listening for expected records on output topics 3. Producers set-up and begin producing onto input topics 4. When finished, perform assertions in this method """ self.start(self.wasm_test_input(), self.wasm_test_plan()) input_results, output_results = self.wait_on_results() for script in self.wasm_test_outputs(): for dest in script: outputs = set([ construct_materialized_topic(src.name, dest) for src, _, _ in self.wasm_test_input() ]) tresults = output_results.filter(lambda x: x.topic in outputs) if not self.verify_results()(input_results, tresults): raise Exception( f"Set {dest} results weren't as expected: {type(self).__name__}" )