def _create_config(self): try: self._config = Config.get_instance() except ConfigNotExistsException: click.echo(f"No config provided in {config_dir()}") if ensure_approval( f"Should a sample file be created in {config_dir()}"): config_dir().mkdir(exist_ok=True) copyfile(sample_config_path().as_posix(), config_path()) else: raise if ensure_approval("Do you want to modify the config file now?"): click.edit(filename=config_path().as_posix()) self._config = Config.get_instance()
def __init__(self): self.no_verify = False try: self.config = Config() except ConfigNotExistsException: click.echo(f"No config provided in {config_dir()}") config_dir().mkdir(exist_ok=True) if ensure_approval( f"Should a sample file be created in {config_dir()}"): copyfile(sample_config_path().as_posix(), config_path()) if ensure_approval("Do you want to modify the config file now?"): click.edit(filename=config_path().as_posix()) sys.exit(0) self._cluster = None
def create_topic(state: State, topic_name: str): if not ensure_approval("Are you sure?", no_verify=state.no_verify): click.echo("Aborted") return topic_controller = state.cluster.topic_controller topic_controller.create_topics([Topic(topic_name)])
def create_topic( state: State, topic_name: str, template_topic: str, partitions: Optional[int], replication_factor: Optional[int] ): """Create a topic. Create a topic called TOPIC_NAME with the option of providing a template topic, <template_topic>, from which all the configuration options will be copied. If both <template_topic> and any of the <partitions> or <replication-factor> options are given, then <partitions> or <replication-factor> takes precedence over corresponding attributes of <template_topic>. """ topic_controller = state.cluster.topic_controller if topic_controller.topic_exists(topic_name): raise ValidationException(f"Topic {topic_name!r} already exists.") if template_topic: topic = topic_from_template(template_topic, partitions, replication_factor, topic_controller, topic_name) else: topic = topic_with_defaults(partitions, replication_factor, state, topic_name) if not ensure_approval( f"Create topic {blue_bold(topic.name)} " + f"with replication factor {blue_bold(str(topic.replication_factor))} " + f"and {blue_bold(str(topic.num_partitions))} partition" + ("s" if topic.num_partitions != 1 else "") + f" in context {blue_bold(state.config.current_context)}?", no_verify=state.no_verify, ): click.echo(click.style("Aborted!", bg="red")) return topic_controller.create_topics([topic]) click.echo(click.style(f"Topic with '{topic.name}' successfully created.", fg="green"))
def delete_consumergroup(state: State, consumergroup_id: Tuple[str]): """Delete consumer groups""" consumer_groups = list(consumergroup_id) + get_piped_stdin_arguments() consumergroup_controller: ConsumerGroupController = ConsumerGroupController( state.cluster) current_consumergroups = consumergroup_controller.list_consumer_groups() existing_consumer_groups: List[str] = [] for group in consumer_groups: if group in current_consumergroups: click.echo(f"Deleting {click.style(group, fg='green')}") existing_consumer_groups.append(group) else: click.echo( f"Skipping {click.style(group, fg='yellow')} — does not exist") if not existing_consumer_groups: click.echo( click.style( "The provided list contains no existing consumer groups.", fg="red")) else: if ensure_approval("Are you sure?", no_verify=state.no_verify): consumergroup_controller.delete_consumer_groups( existing_consumer_groups) current_consumergroups = consumergroup_controller.list_consumer_groups( ) assert all(consumer_group not in current_consumergroups for consumer_group in existing_consumer_groups) click.echo( click.style( f"Consumer groups '{existing_consumer_groups}' successfully deleted.", fg="green"))
def delete_topics(state: State, topic_list: Tuple[str]): """Delete multiple topics WARNING: This command cannot be undone, and all data in the topics will be lost. """ topic_names = list(topic_list) + get_piped_stdin_arguments() topic_controller = state.cluster.topic_controller current_topics = [ topic.name for topic in topic_controller.list_topics(get_topic_objects=False) ] existing_topics: List[str] = [] for topic in topic_names: if topic in current_topics: click.echo(f"Deleting {click.style(topic, fg='green')}") existing_topics.append(topic) else: click.echo( f"Skipping {click.style(topic, fg='yellow')} — does not exist") if not existing_topics: click.echo( click.style("The provided list contains no existing topics.", fg="red")) else: if ensure_approval("Are you sure?", no_verify=state.no_verify): topic_controller.delete_topics( [Topic(topic_name) for topic_name in existing_topics]) click.echo( click.style( f"Topics '{existing_topics}' successfully deleted.", fg="green"))
def esque(state, recreate_config: bool): if recreate_config: config_dir().mkdir(exist_ok=True) if ensure_approval( f"Should the current config in {config_dir()} get replaced?", no_verify=state.no_verify): copyfile(sample_config_path().as_posix(), config_path())
def transfer(state: State, topic: str, from_context: str, to_context: str, numbers: int, last: bool, avro: bool, keep_file: bool): current_timestamp_milliseconds = int(round(time.time() * 1000)) unique_name = topic + "_" + str(current_timestamp_milliseconds) group_id = "group_for_" + unique_name directory_name = "message_" + unique_name base_dir = Path(directory_name) state.config.context_switch(from_context) with HandleFileOnFinished(base_dir, keep_file) as working_dir: number_consumed_messages = _consume_to_file(working_dir, topic, group_id, from_context, numbers, avro, last) if number_consumed_messages == 0: click.echo( click.style("Execution stopped, because no messages consumed.", fg="red")) click.echo( bold( "Possible reasons: The topic is empty or the starting offset was set too high." )) return click.echo("\nReady to produce to context " + blue_bold(to_context) + " and target topic " + blue_bold(topic)) if not ensure_approval("Do you want to proceed?\n", no_verify=state.no_verify): return state.config.context_switch(to_context) _produce_from_file(topic, to_context, working_dir, avro)
def delete_topic(state: State, topic_name: str): topic_controller = state.cluster.topic_controller if ensure_approval("Are you sure?", no_verify=state.no_verify): topic_controller.delete_topic(Topic(topic_name)) assert topic_name not in (t.name for t in topic_controller.list_topics())
def config_recreate(state: State): """(Re)create esque config. Overwrites the existing esque config file with the sample config. If no esque config file already exists, create one with the sample config.""" config_dir().mkdir(exist_ok=True) if ensure_approval(f"Should the current config in {config_dir()} get replaced?", no_verify=state.no_verify): copyfile(sample_config_path().as_posix(), config_path())
def set_offsets( state: State, consumer_id: str, topic_name: str, offset_to_value: int, offset_by_delta: int, offset_to_timestamp: str, offset_from_group: str, ): """Set consumer group offsets. Change or set the offset of a consumer group for a topic, i.e. the message number the consumer group will read next. This can be done by specifying an explicit offset (--offset-to-value), a delta to shift the current offset forwards or backwards (--offset-by-delta), a timestamp in which the offset of the first message on or after the timestamp is taken (--offset-by-timestamp), or a group from which to copy the offsets from. In the case that the consumer group reads from more than one topic, a regular expression can be given to specify the offset of which topic to change. NOTE: the default is to change the offset for all topics.""" logger = logging.getLogger(__name__) consumergroup_controller = ConsumerGroupController(state.cluster) offset_plan = consumergroup_controller.create_consumer_group_offset_change_plan( consumer_id=consumer_id, topic_name=topic_name if topic_name else ".*", offset_to_value=offset_to_value, offset_by_delta=offset_by_delta, offset_to_timestamp=offset_to_timestamp, offset_from_group=offset_from_group, ) if offset_plan and len(offset_plan) > 0: click.echo(green_bold("Proposed offset changes: ")) offset_plan.sort(key=attrgetter("topic_name", "partition_id")) for topic_name, group in groupby(offset_plan, attrgetter("topic_name")): group = list(group) max_proposed = max(len(str(elem.proposed_offset)) for elem in group) max_current = max(len(str(elem.current_offset)) for elem in group) for plan_element in group: new_offset = str(plan_element.proposed_offset).rjust(max_proposed) format_args = dict( topic_name=plan_element.topic_name, partition_id=plan_element.partition_id, current_offset=plan_element.current_offset, new_offset=new_offset if plan_element.offset_equal else red_bold(new_offset), max_current=max_current, ) click.echo( "Topic: {topic_name}, partition {partition_id:2}, current offset: {current_offset:{max_current}}, new offset: {new_offset}".format( **format_args ) ) if ensure_approval("Are you sure?", no_verify=state.no_verify): consumergroup_controller.edit_consumer_group_offsets(consumer_id=consumer_id, offset_plan=offset_plan) else: logger.info("No changes proposed.") return
def delete_topic(state: State, topic_name: str): """Delete a topic WARNING: This command cannot be undone, and all data in the topic will be lost. """ topic_controller = state.cluster.topic_controller if ensure_approval("Are you sure?", no_verify=state.no_verify): topic_controller.delete_topic(Topic(topic_name)) assert topic_name not in (t.name for t in topic_controller.list_topics(get_topic_objects=False)) click.echo(click.style(f"Topic with name '{topic_name}' successfully deleted.", fg="green"))
def create_consumergroup(state: State, consumergroup_id: str, topics: str): """ Create consumer group for several topics using format <topic_name>[partition]=offset. [partition] and offset are optional. Default value for offset is 0. If there is no partition, consumer group will be assigned to all topic partitions. """ pattern = re.compile( r"(?P<topic_name>[\w.-]+)(?:\[(?P<partition>\d+)\])?(?:=(?P<offset>\d+))?" ) topic_controller = state.cluster.topic_controller clean_topics: List[TopicPartition] = [] msg = "" for topic in topics: match = pattern.match(topic) if not match: raise ValidationException("Topic name should be present") topic = match.group("topic_name") partition_match = match.group("partition") offset_match = match.group("offset") offset = int(offset_match) if offset_match else 0 if not partition_match: topic_config = topic_controller.get_cluster_topic(topic) watermarks = topic_config.watermarks for part, wm in watermarks.items(): offset = offset if wm.high >= offset else 0 clean_topics.append( TopicPartition(topic=topic, partition=part, offset=offset)) msg += f"{topic}[{part}]={offset}\n" else: partition = int(partition_match) clean_topics.append( TopicPartition(topic=topic, partition=partition, offset=offset)) msg += f"{topic}[{partition}]={offset}\n" if not ensure_approval( f"This will create the consumer group '{consumergroup_id}' with initial offsets:\n" + msg + "\nAre you sure?", no_verify=state.no_verify, ): click.echo(click.style("Aborted!", bg="red")) return consumergroup_controller: ConsumerGroupController = ConsumerGroupController( state.cluster) created_consumergroup: ConsumerGroup = consumergroup_controller.create_consumer_group( consumergroup_id, offsets=clean_topics) click.echo( click.style( f"Consumer group '{created_consumergroup.id}' was successfully created", fg="green"))
def edit_topic(state: State, topic_name: str): controller = state.cluster.topic_controller topic = state.cluster.topic_controller.get_cluster_topic(topic_name) new_conf = click.edit(topic.to_yaml(only_editable=True), extension=".yml") # edit process can be aborted, ex. in vim via :q! if new_conf is None: click.echo("Change aborted") return topic.from_yaml(new_conf) diff = pretty_topic_diffs( {topic_name: controller.diff_with_cluster(topic)}) click.echo(diff) if ensure_approval("Are you sure?"): controller.alter_configs([topic])
def edit_offsets(state: State, consumer_id: str, topic_name: str): """Edit a topic. Open the offsets of the consumer group in the default editor. If the user saves upon exiting the editor, all the offsets will be set to the given values. """ logger = logging.getLogger(__name__) consumergroup_controller = ConsumerGroupController(state.cluster) consumer_group_state, offset_plans = consumergroup_controller.read_current_consumer_group_offsets( consumer_id=consumer_id, topic_name_expression=topic_name if topic_name else ".*") if consumer_group_state != "Empty": logger.error( "Consumergroup {} is not empty. Setting offsets is only allowed for empty consumer groups." .format(consumer_id)) sorted_offset_plan = list(offset_plans.values()) sorted_offset_plan.sort(key=attrgetter("topic_name", "partition_id")) offset_plan_as_yaml = { "offsets": [{ "topic": element.topic_name, "partition": element.partition_id, "offset": element.current_offset } for element in sorted_offset_plan] } _, new_conf = edit_yaml(str(offset_plan_as_yaml), validator=validation.validate_offset_config) for new_offset in new_conf["offsets"]: plan_key: str = f"{new_offset['topic']}::{new_offset['partition']}" if plan_key in offset_plans: final_value, error, message = ConsumerGroupController.select_new_offset_for_consumer( requested_offset=new_offset["offset"], offset_plan=offset_plans[plan_key]) if error: logger.error(message) offset_plans[plan_key].proposed_offset = final_value if offset_plans and len(offset_plans) > 0: click.echo(green_bold("Proposed offset changes: ")) pretty_offset_plan(list(offset_plans.values())) if ensure_approval("Are you sure?", no_verify=state.no_verify): consumergroup_controller.edit_consumer_group_offsets( consumer_id=consumer_id, offset_plan=list(offset_plans.values())) else: logger.info("No changes proposed.") return
def create_topic(state: State, topic_name: str, like: str): """Create a topic. Create a topic called TOPIC_NAME with the option of providing a template topic, <template_topic>, from which all the configuration options will be copied. """ if not ensure_approval("Are you sure?", no_verify=state.no_verify): click.echo("Aborted!") return topic_controller = state.cluster.topic_controller if like: template_config = topic_controller.get_cluster_topic(like) topic = Topic( topic_name, template_config.num_partitions, template_config.replication_factor, template_config.config ) else: topic = Topic(topic_name) topic_controller.create_topics([topic]) click.echo(click.style(f"Topic with name '{topic.name}' successfully created.", fg="green"))
def delete_topic(state: State, topic_name: str): """Delete a single topic WARNING: This command cannot be undone, and all data in the topic will be lost. """ topic_controller = state.cluster.topic_controller current_topics = [ topic.name for topic in topic_controller.list_topics(get_topic_objects=False) ] if topic_name not in current_topics: click.echo( click.style(f"Topic [{topic_name}] doesn't exist on the cluster.", fg="red")) else: click.echo(f"Deleting {click.style(topic_name, fg='green')}") if ensure_approval("Are you sure?", no_verify=state.no_verify): topic_controller.delete_topics([Topic(topic_name)]) click.echo( click.style(f"Topic '{topic_name}' successfully deleted.", fg="green"))
def edit_topic(state: State, topic_name: str): """Edit a topic. Open the topic's configuration in the default editor. If the user saves upon exiting the editor, all the given changes will be applied to the topic. """ controller = state.cluster.topic_controller topic = state.cluster.topic_controller.get_cluster_topic(topic_name) _, new_conf = edit_yaml(topic.to_yaml(only_editable=True), validator=validation.validate_editable_topic_config) local_topic = copy_to_local(topic) local_topic.update_from_dict(new_conf) diff = controller.diff_with_cluster(local_topic) if not diff.has_changes: click.echo("Nothing changed.") return click.echo(pretty_topic_diffs({topic_name: diff})) if ensure_approval("Are you sure?"): controller.alter_configs([local_topic]) else: click.echo("Canceled!")
def set_offsets( state: State, consumer_id: str, topic_name: str, offset_to_value: int, offset_by_delta: int, offset_to_timestamp: str, offset_from_group: str, ): """Set consumer group offsets. Change or set the offset of a consumer group for a topic, i.e. the message number the consumer group will read next. This can be done by specifying an explicit offset (--offset-to-value), a delta to shift the current offset forwards or backwards (--offset-by-delta), a timestamp in which the offset of the first message on or after the timestamp is taken (--offset-by-timestamp), or a group from which to copy the offsets from. In the case that the consumer group reads from more than one topic, a regular expression can be given to specify the offset of which topic to change. NOTE: the default is to change the offset for all topics.""" logger = logging.getLogger(__name__) consumergroup_controller = ConsumerGroupController(state.cluster) offset_plan = consumergroup_controller.create_consumer_group_offset_change_plan( consumer_id=consumer_id, topic_name=topic_name if topic_name else ".*", offset_to_value=offset_to_value, offset_by_delta=offset_by_delta, offset_to_timestamp=offset_to_timestamp, offset_from_group=offset_from_group, ) if offset_plan and len(offset_plan) > 0: click.echo(green_bold("Proposed offset changes: ")) pretty_offset_plan(offset_plan) if ensure_approval("Are you sure?", no_verify=state.no_verify): consumergroup_controller.edit_consumer_group_offsets( consumer_id=consumer_id, offset_plan=offset_plan) else: logger.info("No changes proposed.") return
def transfer( state: State, from_topic: str, to_topic: str, from_context: str, to_context: str, number: int, last: bool, avro: bool, binary: bool, consumergroup: str, match: str = None, ): """Transfer messages between two topics. Read messages from the source topic in the source context and write them into the destination topic in the destination context. This function is shorthand for using a combination of `esque consume` and `esque produce` \b EXAMPLES: # Transfer the first 10 messages from TOPIC1 in the current context to TOPIC2 in context DSTCTX. esque transfer --first -n 10 --from-topic TOPIC1 --to-topic TOPIC2 --to-context DSTCTX \b # Transfer the first 10 messages from TOPIC1 in the context SRCCTX to TOPIC2 in context DSTCTX, assuming the messages are AVRO. esque transfer --first -n 10 --avro --from-topic TOPIC1 --from-context SRCCTX --to-topic TOPIC2 --to-context DSTCTX """ if not from_context: from_context = state.config.current_context state.config.context_switch(from_context) if binary and avro: raise ValueError("Cannot set data to be interpreted as binary AND avro.") if not to_context: to_context = from_context if from_context == to_context and from_topic == to_topic: raise ValueError("Cannot transfer data to the same topic.") topic_controller = Cluster().topic_controller if not topic_controller.topic_exists(to_topic): if ensure_approval(f"Topic {to_topic!r} does not exist, do you want to create it?", no_verify=state.no_verify): topic_controller.create_topics([Topic(to_topic)]) else: click.echo(click.style("Aborted!", bg="red")) return builder = PipelineBuilder() input_message_serializer = create_input_serializer(avro, binary, state) builder.with_input_message_serializer(input_message_serializer) input_handler = create_input_handler(consumergroup, from_context, from_topic) builder.with_input_handler(input_handler) output_message_serializer = create_output_serializer(avro, binary, to_topic, state) builder.with_output_message_serializer(output_message_serializer) output_handler = create_output_handler(to_context, to_topic) builder.with_output_handler(output_handler) if last: start = KafkaHandler.OFFSET_AFTER_LAST_MESSAGE else: start = KafkaHandler.OFFSET_AT_FIRST_MESSAGE builder.with_range(start=start, limit=number) if match: builder.with_stream_decorator(yield_only_matching_messages(match)) counter, counter_decorator = event_counter() builder.with_stream_decorator(counter_decorator) pipeline = builder.build() pipeline.run_pipeline() click.echo( green_bold(str(counter.message_count)) + " messages consumed from topic " + blue_bold(from_topic) + " in context " + blue_bold(to_context) + " and produced to topic " + blue_bold(to_topic) + " in context " + blue_bold(to_context) + "." )
def produce( state: State, topic: str, to_context: str, directory: str, avro: bool, match: str = None, read_from_stdin: bool = False, ignore_stdin_errors: bool = False, ): """Produce messages to a topic. Write messages to a given topic in a given context. These messages can come from either a directory <directory> containing files corresponding to the different partitions or from STDIN. \b EXAMPLES: # Write all messages from the files in <directory> to TOPIC in the <destination_ctx> context. esque produce -d <directory> -t <destination_ctx> TOPIC \b # Start environment in terminal to write messages to TOPIC in the <destination_ctx> context. esque produce --stdin -f <destination_ctx> -y TOPIC \b # Copy source_topic to destination_topic. esque consume -f first-context --stdout source_topic | esque produce -t second-context --stdin destination_topic """ if directory is None and not read_from_stdin: raise ValueError("You have to provide a directory or use the --stdin flag.") if directory is not None: input_directory = Path(directory) if not input_directory.exists(): raise ValueError(f"Directory {directory} does not exist!") if not to_context: to_context = state.config.current_context state.config.context_switch(to_context) topic_controller = state.cluster.topic_controller if topic not in map(attrgetter("name"), topic_controller.list_topics(get_topic_objects=False)): click.echo(f"Topic {blue_bold(topic)} does not exist in context {blue_bold(to_context)}.") if ensure_approval(f"Would you like to create it now?"): topic_controller.create_topics([Topic(topic)]) else: raise TopicDoesNotExistException(f"Topic {topic} does not exist!", -1) stdin = click.get_text_stream("stdin") if read_from_stdin and isatty(stdin): click.echo( "Type the messages to produce, " + ("in JSON format, " if not ignore_stdin_errors else "") + blue_bold("one per line") + ". End with " + blue_bold("CTRL+D") + "." ) elif read_from_stdin and not isatty(stdin): click.echo(f"Reading messages from an external source, {blue_bold('one per line')}).") else: click.echo( f"Producing from directory {blue_bold(str(directory))} to topic {blue_bold(topic)}" f" in target context {blue_bold(to_context)}" ) producer = ProducerFactory().create_producer( topic_name=topic, input_directory=input_directory if not read_from_stdin else None, avro=avro, match=match, ignore_stdin_errors=ignore_stdin_errors, ) total_number_of_messages_produced = producer.produce() click.echo( green_bold(str(total_number_of_messages_produced)) + " messages successfully produced to topic " + blue_bold(topic) + " in context " + blue_bold(to_context) + "." )
def apply(state: State, file: str): """Apply a set of topic configurations. Create new topics and apply changes to existing topics, as specified in the config yaml file <file>. """ # Get topic data based on the YAML yaml_topic_configs = yaml.safe_load(open(file)).get("topics") yaml_topics = [Topic.from_dict(conf) for conf in yaml_topic_configs] yaml_topic_names = [t.name for t in yaml_topics] if not len(yaml_topic_names) == len(set(yaml_topic_names)): raise ValidationException("Duplicate topic names in the YAML!") # Get topic data based on the cluster state topic_controller = state.cluster.topic_controller cluster_topics = topic_controller.list_topics(search_string="|".join(yaml_topic_names)) cluster_topic_names = [t.name for t in cluster_topics] # Calculate changes to_create = [yaml_topic for yaml_topic in yaml_topics if yaml_topic.name not in cluster_topic_names] to_edit = [ yaml_topic for yaml_topic in yaml_topics if yaml_topic not in to_create and topic_controller.diff_with_cluster(yaml_topic).has_changes ] to_edit_diffs = {t.name: topic_controller.diff_with_cluster(t) for t in to_edit} to_ignore = [yaml_topic for yaml_topic in yaml_topics if yaml_topic not in to_create and yaml_topic not in to_edit] # Sanity check - the 3 groups of topics should be complete and have no overlap assert ( set(to_create).isdisjoint(set(to_edit)) and set(to_create).isdisjoint(set(to_ignore)) and set(to_edit).isdisjoint(set(to_ignore)) and len(to_create) + len(to_edit) + len(to_ignore) == len(yaml_topics) ) # Print diffs so the user can check click.echo(pretty_unchanged_topic_configs(to_ignore)) click.echo(pretty_new_topic_configs(to_create)) click.echo(pretty_topic_diffs(to_edit_diffs)) # Check for actionable changes if len(to_edit) + len(to_create) == 0: click.echo("No changes detected, aborting!") return # Warn users & abort when replication & num_partition changes are attempted if any(not diff.is_valid for _, diff in to_edit_diffs.items()): click.echo( "Changes to `replication_factor` and `num_partitions` can not be applied on already existing topics." ) click.echo("Cancelling due to invalid changes") return # Get approval if not ensure_approval("Apply changes?", no_verify=state.no_verify): click.echo("Cancelling changes") return # apply changes topic_controller.create_topics(to_create) topic_controller.alter_configs(to_edit) # output confirmation changes = {"unchanged": len(to_ignore), "created": len(to_create), "changed": len(to_edit)} click.echo(click.style(pretty({"Successfully applied changes": changes}), fg="green"))
def produce( state: State, topic: str, to_context: str, directory: str, avro: bool, binary: bool, match: str = None, read_from_stdin: bool = False, ignore_stdin_errors: bool = False, ): """Produce messages to a topic. Write messages to a given topic in a given context. These messages can come from either a directory <directory> that was previously written to with "esque consume" or from JSON objects coming in via STDIN. If reading from STDIN, then data will be expected as single-line JSON objects with the message key and the message value always being a string. The --avro option is currently not supported when reading from STDIN. With the --binary option those strings are expected to contain the base64 encoded binary data. By default, the data in the messages is treated utf-8 encoded strings and will be used as-is. In addition to "key" and "value" one can also define headers as list of objects with a "key" and a "value" attribute with the former being a string and the latter being a string, "null" or simply not defined. \b So valid json objects for reading from stdin would be: {"key": "foo", "value": "bar", "headers":[{"key":"h1", "value":"v1"},{"key":"h2"}]} {"key": "foo", "value": null, "partition": 1} {"key": "foo"} \b EXAMPLES: # Write all messages from the files in <directory> to TOPIC in the <destination_ctx> context. esque produce -d <directory> -t <destination_ctx> TOPIC \b # Start environment in terminal to write messages to TOPIC in the <destination_ctx> context. esque produce --stdin -f <destination_ctx> -y TOPIC \b # Copy source_topic to destination_topic. esque consume -f first-context --stdout source_topic | esque produce -t second-context --stdin destination_topic """ if not to_context: to_context = state.config.current_context state.config.context_switch(to_context) if not read_from_stdin: if not directory: raise ValueError( "Need to provide directory if not reading from stdin.") else: directory = pathlib.Path(directory) elif avro: raise ValueError( "Cannot read avro data from stdin. Use a directory instead.") if binary and avro: raise ValueError( "Cannot set data to be interpreted as binary AND avro.") topic_controller = Cluster().topic_controller if not topic_controller.topic_exists(topic): if ensure_approval( f"Topic {topic!r} does not exist, do you want to create it?", no_verify=state.no_verify): topic_controller.create_topics([Topic(topic)]) else: click.echo(click.style("Aborted!", bg="red")) return builder = PipelineBuilder() input_handler = create_input_handler(directory, read_from_stdin) builder.with_input_handler(input_handler) input_message_serializer = create_input_message_serializer( directory, avro, binary) builder.with_input_message_serializer(input_message_serializer) output_message_serializer = create_output_serializer( avro, binary, topic, state) builder.with_output_message_serializer(output_message_serializer) output_handler = create_output_handler(to_context, topic) builder.with_output_handler(output_handler) if match: builder.with_stream_decorator(yield_only_matching_messages(match)) counter, counter_decorator = event_counter() builder.with_stream_decorator(counter_decorator) pipeline = builder.build() pipeline.run_pipeline() click.echo( green_bold(str(counter.message_count)) + " messages successfully produced to topic " + blue_bold(topic) + " in context " + blue_bold(to_context) + ".")
def ping(state: State, times: int, wait: int): """Test the connection to the kafka cluster. Ping the kafka cluster by writing messages to and reading messages from it. After the specified number of "pings", return the minimum, maximum, and average time for the round trip. \b The abbreviations in the output have the following meaning: c2s: client to server (time of creation till kafka wrote it to disk) s2c: server to client (time from kafka write to disk till client received it again) c2c: client to client (complete round trip) """ topic_controller = state.cluster.topic_controller if not topic_controller.topic_exists(PING_TOPIC): if ensure_approval( f"Topic {PING_TOPIC!r} does not exist, do you want to create it?", no_verify=state.no_verify): topic_config = { "cleanup.policy": "compact,delete", "retention.ms": int(datetime.timedelta(days=1).microseconds / 1000), "message.timestamp.type": "LogAppendTime", } topic_controller.create_topics( [Topic(PING_TOPIC, num_partitions=10, config=topic_config)]) else: click.echo(click.style("Aborted!", bg="red")) return ping_id = uuid.uuid4().bytes click.echo("Initializing producer.") output_handler = KafkaHandler( KafkaHandlerConfig(scheme="kafka", host=state.config.current_context, path=PING_TOPIC)) output_handler.write_message(create_tombstone_message(ping_id)) input_handler = KafkaHandler( KafkaHandlerConfig(scheme="kafka", host=state.config.current_context, path=PING_TOPIC)) input_stream = filter(key_matches(ping_id), skip_stream_events(input_handler.message_stream())) message_iterator = iter(input_stream) click.echo("Initializing consumer.") input_handler.seek(KafkaHandler.OFFSET_AT_LAST_MESSAGE) next(message_iterator) click.echo( f"Pinging cluster with bootstrap servers {state.cluster.bootstrap_servers}." ) deltas = [] try: for i in range(times): output_handler.write_message(create_ping_message(ping_id)) msg_recieved = next(message_iterator) dt_created = dt_from_bytes(msg_recieved.value) dt_delivered = msg_recieved.timestamp dt_received = datetime.datetime.now(tz=datetime.timezone.utc) time_client_to_server_ms = (dt_delivered - dt_created).microseconds / 1000 time_server_to_client_ms = (dt_received - dt_delivered).microseconds / 1000 time_client_to_client_ms = (dt_received - dt_created).microseconds / 1000 deltas.append((time_client_to_server_ms, time_server_to_client_ms, time_client_to_client_ms)) click.echo(f"m_seq={i} c2s={time_client_to_server_ms:.2f}ms " f"s2c={time_server_to_client_ms:.2f}ms " f"c2c={time_client_to_client_ms:.2f}ms") sleep(wait) except KeyboardInterrupt: return # make sure our ping messages get cleaned up output_handler.write_message(create_tombstone_message(ping_id)) click.echo("--- statistics ---") click.echo(f"{len(deltas)} messages sent/received.") c2s_times, s2c_times, c2c_times = zip(*deltas) click.echo(f"c2s {stats(c2s_times)}") click.echo(f"s2c {stats(s2c_times)}") click.echo(f"c2c {stats(c2c_times)}")