def connect_sink_factory( running_cluster_config: Dict[str, str], topic_and_partitions: Tuple[str, int] ) -> ConnectSinkFactory: """ Creates a factory, that can be used to create readily usable instances of :class:`test.utils.PyConnectTestSink`. If necessary, any config parameter can be overwritten by providing a custom config as argument to the factory. """ topic_id, partitions = topic_and_partitions group_id = topic_id + "_sink_group_id" sink_config = SinkConfig( { "bootstrap_servers": running_cluster_config["broker"], "schema_registry": running_cluster_config["schema-registry"], "offset_commit_interval": 1, "group_id": group_id, "poll_timeout": 2, "topics": topic_id, } ) def connect_sink_factory_(custom_config=None): if custom_config is not None: config = sink_config.copy() config.update(custom_config) else: config = sink_config test_sink = PyConnectTestSink(config) test_sink.max_runs = 30 return test_sink return connect_sink_factory_
def test_sanity_check_failure_subclass(caplog): caplog.set_level(logging.DEBUG) with pytest.raises(SanityError): SinkConfig( dict( bootstrap_servers="localhost", schema_registry="locahlost", offset_commit_interval=-1, group_id="groupid", topics="topics", ) )
def test_sanity_check_success(): config = SinkConfig( dict( bootstrap_servers="localhost", schema_registry="localhost", offset_commit_interval=1, sink_commit_retry_count=2, group_id="groupid", topics="topics", ) ) assert config["offset_commit_interval"] == 1 assert config["sink_commit_retry_count"] == 2
def test_env_loader(): env_vars = { "PYCONNECT_BOOTSTRAP_SERVERS": "broker:9092", "PYCONNECT_SCHEMA_REGISTRY": "schema-registry:8082", "PYCONNECT_TOPICS": "testtopic", "PYCONNECT_GROUP_ID": "testgroup", } with mock.patch.dict("pyconnect.config.os.environ", env_vars): config = SinkConfig.from_env_variables() assert config["bootstrap_servers"] == ["broker:9092"] assert config["schema_registry"] == "schema-registry:8082" assert config["topics"] == ["testtopic"] assert config["group_id"] == "testgroup"
def sink_factory(): conf = SinkConfig( dict( bootstrap_servers="localhost", schema_registry="localhost", offset_commit_interval=1, sink_commit_retry_count=2, group_id="group_id", poll_timeout=1, topics="", unify_logging=False, ) ) with mock.patch("pyconnect.pyconnectsink.RichAvroConsumer", autospec=True): def sink_factory_(): sink = PyConnectTestSink(conf) sink._consumer.poll.return_value = None sink._check_status = mock.Mock() return sink yield sink_factory_
def sink_factory(): conf = SinkConfig( dict( bootstrap_servers="localhost", schema_registry="localhost", offset_commit_interval=1, sink_commit_retry_count=2, group_id="group_id", poll_timeout=1, topics="", unify_logging=False, kafka_opts={"allow.auto.create.topics": True}, )) with mock.patch( "pyconnect.pyconnectsink.DeserializingConsumer"), mock.patch( "pyconnect.pyconnectsink.SchemaRegistryClient"): def sink_factory_(): sink = PyConnectTestSink(conf) sink._consumer.poll.return_value = None sink._check_status = mock.Mock() return sink yield sink_factory_
def test_host_splitting(): servers = ( 'localhost,otherhost:1234/asdf, "user:pw@yetanotherhost/blubb",' " there-is-more/where/that/came%20/from?blah=blubb&foo=bar " ) servers_list = [ "localhost", "otherhost:1234/asdf", "user:pw@yetanotherhost/blubb", "there-is-more/where/that/came%20/from?blah=blubb&foo=bar", ] config = SinkConfig( dict( bootstrap_servers=servers, schema_registry="localhost", offset_commit_interval=1, sink_commit_retry_count=2, group_id="groupid", topics="topics", ) ) assert config["bootstrap_servers"] == servers_list