def test_collect_hosts__happy_path(): hosts = "127.0.0.1:1234,127.0.0.1" results = collect_hosts(hosts) assert set(results) == set([ ('127.0.0.1', 1234, socket.AF_INET), ('127.0.0.1', 9092, socket.AF_INET), ])
def test_collect_hosts__with_spaces(): hosts = "localhost:1234, localhost" results = collect_hosts(hosts) assert set(results) == set([ ('localhost', 1234, socket.AF_UNSPEC), ('localhost', 9092, socket.AF_UNSPEC), ])
def test_collect_hosts__ipv6(): hosts = "[localhost]:1234,[2001:1000:2000::1],[2001:1000:2000::1]:1234" results = collect_hosts(hosts) assert set(results) == set([ ('localhost', 1234, socket.AF_INET6), ('2001:1000:2000::1', 9092, socket.AF_INET6), ('2001:1000:2000::1', 1234, socket.AF_INET6), ])
def test_collect_hosts__happy_path(self): hosts = "127.0.0.1:1234,127.0.0.1" results = collect_hosts(hosts) self.assertEqual(set(results), set([ ('127.0.0.1', 1234, socket.AF_INET), ('127.0.0.1', 9092, socket.AF_INET), ]))
def test_collect_hosts__with_spaces(self): hosts = "localhost:1234, localhost" results = collect_hosts(hosts) self.assertEqual(set(results), set([ ('localhost', 1234), ('localhost', 9092), ]))
def test_collect_hosts__happy_path(self): hosts = "localhost:1234,localhost" results = collect_hosts(hosts) self.assertEqual(set(results), set([ ('localhost', 1234, socket.AF_INET), ('localhost', 9092, socket.AF_INET), ]))
def _generate_bootstrap_brokers(self): # collect_hosts does not perform DNS, so we should be fine to re-use bootstrap_hosts = collect_hosts(self.config["bootstrap_servers"]) brokers = {} for i, (host, port, _) in enumerate(bootstrap_hosts): node_id = "bootstrap-%s" % i brokers[node_id] = BrokerMetadata(node_id, host, port, None) return brokers
def _generate_bootstrap_brokers(self): # collect_hosts does not perform DNS, so we should be fine to re-use bootstrap_hosts = collect_hosts(self.config['bootstrap_servers']) brokers = {} for i, (host, port, _) in enumerate(bootstrap_hosts): node_id = 'bootstrap-%s' % i brokers[node_id] = BrokerMetadata(node_id, host, port, None) return brokers
def _generate_bootstrap_brokers(self): # collect_hosts does not perform DNS, so we should be fine to re-use bootstrap_hosts = collect_hosts(self.config['bootstrap_servers']) while True: for host, port, afi in bootstrap_hosts: for _, __, ___, ____, sockaddr in dns_lookup(host, port, afi): yield BrokerMetadata('bootstrap', sockaddr[0], sockaddr[1], None)
def test_collect_hosts__ipv6(self): hosts = "[localhost]:1234,[2001:1000:2000::1],[2001:1000:2000::1]:1234" results = collect_hosts(hosts) self.assertEqual(set(results), set([ ('localhost', 1234, socket.AF_INET6), ('2001:1000:2000::1', 9092, socket.AF_INET6), ('2001:1000:2000::1', 1234, socket.AF_INET6), ]))
def test_collect_hosts__happy_path(self): hosts = "localhost:1234,localhost" results = collect_hosts(hosts) self.assertEqual( set(results), set([ ('localhost', 1234, socket.AF_INET), ('localhost', 9092, socket.AF_INET), ]))
def test_collect_hosts__ipv6(self): hosts = "[localhost]:1234,[2001:1000:2000::1],[2001:1000:2000::1]:1234" results = collect_hosts(hosts) self.assertEqual( set(results), set([ ('localhost', 1234, socket.AF_INET6), ('2001:1000:2000::1', 9092, socket.AF_INET6), ('2001:1000:2000::1', 1234, socket.AF_INET6), ]))
def __init__(self, hosts, *, client_id=CLIENT_ID, loop): self._client_id = client_id self._hosts = collect_hosts(hosts) self._conns = {} self._brokers = {} # broker_id -> BrokerMetadata self._topics_to_brokers = {} # TopicAndPartition -> BrokerMetadata self._topic_partitions = {} # topic -> partition -> PartitionMetadata self._loop = loop self._request_id = 0
def test_collect_hosts__string_list(self): hosts = [ 'localhost:1234', 'localhost', ] results = collect_hosts(hosts) self.assertEqual(set(results), set([ ('localhost', 1234), ('localhost', 9092), ]))
def __init__(self, hosts, client_id=CLIENT_ID, timeout=DEFAULT_SOCKET_TIMEOUT_SECONDS): # We need one connection to bootstrap self.client_id = client_id self.timeout = timeout self.hosts = collect_hosts(hosts) # create connections only when we need them self.conns = {} self.brokers = {} # broker_id -> BrokerMetadata self.topics_to_brokers = {} # topic_id -> broker_id self.topic_partitions = {} # topic_id -> [0, 1, 2, ...] self.load_metadata_for_topics() # bootstrap with all metadata
def __init__(self, **configs): self.config = copy.copy(self.DEFAULT_CONFIG) for key in self.config: if key in configs: self.config[key] = configs[key] # these properties need to be set on top of the initialization pipeline # because they are used when __del__ method is called self._closed = False self._wake_r, self._wake_w = socket.socketpair() self._selector = self.config["selector"]() self.cluster = ClusterMetadata(**self.config) self._topics = set() # empty set will fetch all topic metadata self._metadata_refresh_in_progress = False self._conns = Dict() # object to support weakrefs self._api_versions = None self._connecting = set() self._sending = set() self._refresh_on_disconnects = True self._last_bootstrap = 0 self._bootstrap_fails = 0 self._wake_r.setblocking(False) self._wake_w.settimeout(self.config["wakeup_timeout_ms"] / 1000.0) self._wake_lock = threading.Lock() self._lock = threading.RLock() # when requests complete, they are transferred to this queue prior to # invocation. The purpose is to avoid invoking them while holding the # lock above. self._pending_completion = collections.deque() self._selector.register(self._wake_r, selectors.EVENT_READ) self._idle_expiry_manager = IdleConnectionManager( self.config["connections_max_idle_ms"]) self._sensors = None if self.config["metrics"]: self._sensors = KafkaClientMetrics( self.config["metrics"], self.config["metric_group_prefix"], weakref.proxy(self._conns), ) self._num_bootstrap_hosts = len( collect_hosts(self.config["bootstrap_servers"])) # Check Broker Version if not set explicitly if self.config["api_version"] is None: check_timeout = self.config["api_version_auto_timeout_ms"] / 1000 self.config["api_version"] = self.check_version( timeout=check_timeout)
def __init__(self, hosts, client_id=CLIENT_ID, timeout=DEFAULT_SOCKET_TIMEOUT_SECONDS, correlation_id=0): # We need one connection to bootstrap self.client_id = client_id self.timeout = timeout self.hosts = collect_hosts(hosts) self.correlation_id = correlation_id self._conns = {} self.brokers = {} # broker_id -> BrokerMetadata self.topics_to_brokers = {} # TopicPartition -> BrokerMetadata self.topic_partitions = {} # topic -> partition -> PartitionMetadata self.load_metadata_for_topics() # bootstrap with all metadata
def __init__(self, **configs): self.config = copy.copy(self.DEFAULT_CONFIG) for key in self.config: if key in configs: self.config[key] = configs[key] if self.config['api_version'] is not None: assert self.config['api_version'] in self.API_VERSIONS, ( 'api_version [{0}] must be one of: {1}'.format( self.config['api_version'], str(self.API_VERSIONS))) self.cluster = ClusterMetadata(**self.config) self._topics = set() # empty set will fetch all topic metadata self._metadata_refresh_in_progress = False self._selector = self.config['selector']() self._conns = Dict() # object to support weakrefs self._connecting = set() self._refresh_on_disconnects = True self._last_bootstrap = 0 self._bootstrap_fails = 0 self._wake_r, self._wake_w = socket.socketpair() self._wake_r.setblocking(False) self._wake_lock = threading.Lock() self._lock = threading.RLock() # when requests complete, they are transferred to this queue prior to # invocation. The purpose is to avoid invoking them while holding the # lock above. self._pending_completion = collections.deque() self._selector.register(self._wake_r, selectors.EVENT_READ) self._idle_expiry_manager = IdleConnectionManager( self.config['connections_max_idle_ms']) self._closed = False self._sensors = None if self.config['metrics']: self._sensors = KafkaClientMetrics( self.config['metrics'], self.config['metric_group_prefix'], weakref.proxy(self._conns)) self._bootstrap(collect_hosts(self.config['bootstrap_servers'])) # Check Broker Version if not set explicitly if self.config['api_version'] is None: check_timeout = self.config['api_version_auto_timeout_ms'] / 1000 self.config['api_version'] = self.check_version( timeout=check_timeout)
def __init__(self, hosts, client_id=CLIENT_ID, timeout=DEFAULT_SOCKET_TIMEOUT_SECONDS, correlation_id=0, metrics=None): # We need one connection to bootstrap self.client_id = client_id self.timeout = timeout self.hosts = [host + ('bootstrap',) for host in collect_hosts(hosts)] self.correlation_id = correlation_id self._metrics_registry = metrics self.metrics = SimpleClientMetrics(metrics if metrics else Metrics()) self._conns = {} self.brokers = {} # broker_id -> BrokerMetadata self.topics_to_brokers = {} # TopicPartition -> BrokerMetadata self.topic_partitions = {} # topic -> partition -> leader self.load_metadata_for_topics() # bootstrap with all metadata
def __init__(self, hosts, client_id=CLIENT_ID, timeout=DEFAULT_SOCKET_TIMEOUT_SECONDS, correlation_id=0, metrics=None): # We need one connection to bootstrap self.client_id = client_id self.timeout = timeout self.hosts = collect_hosts(hosts) self.correlation_id = correlation_id self._metrics_registry = metrics self.metrics = SimpleClientMetrics(metrics if metrics else Metrics()) self._conns = {} self.brokers = {} # broker_id -> BrokerMetadata self.topics_to_brokers = {} # TopicPartition -> BrokerMetadata self.topic_partitions = {} # topic -> partition -> leader self.load_metadata_for_topics() # bootstrap with all metadata
def __init__(self, hosts, client_id=CLIENT_ID, timeout=DEFAULT_SOCKET_TIMEOUT_SECONDS, correlation_id=0, sslopts=None): # We need one connection to bootstrap self.client_id = kafka_bytestring(client_id) self.timeout = timeout self.hosts = collect_hosts(hosts) self.correlation_id = correlation_id self.sslopts = sslopts # create connections only when we need them self.conns = {} self.brokers = {} # broker_id -> BrokerMetadata self.topics_to_brokers = {} # TopicAndPartition -> BrokerMetadata self.topic_partitions = {} # topic -> partition -> PartitionMetadata self.load_metadata_for_topics() # bootstrap with all metadata
def test_collect_hosts__string_list(): hosts = [ 'localhost:1234', 'localhost', '[localhost]', '2001::1', '[2001::1]', '[2001::1]:1234', ] results = collect_hosts(hosts) assert set(results) == set([ ('localhost', 1234, socket.AF_UNSPEC), ('localhost', 9092, socket.AF_UNSPEC), ('localhost', 9092, socket.AF_INET6), ('2001::1', 9092, socket.AF_INET6), ('2001::1', 9092, socket.AF_INET6), ('2001::1', 1234, socket.AF_INET6), ])
def test_collect_hosts__string_list(self): hosts = [ 'localhost:1234', 'localhost', '[localhost]', '2001::1', '[2001::1]:1234', ] results = collect_hosts(hosts) self.assertEqual(set(results), set([ ('localhost', 1234, socket.AF_INET), ('localhost', 9092, socket.AF_INET), ('localhost', 9092, socket.AF_INET6), ('2001::1', 9092, socket.AF_INET6), ('2001::1', 1234, socket.AF_INET6), ]))
def test_collect_hosts__string_list(self): hosts = [ 'localhost:1234', 'localhost', '[localhost]', '2001::1', '[2001::1]:1234', ] results = collect_hosts(hosts) self.assertEqual( set(results), set([ ('localhost', 1234, socket.AF_INET), ('localhost', 9092, socket.AF_INET), ('localhost', 9092, socket.AF_INET6), ('2001::1', 9092, socket.AF_INET6), ('2001::1', 1234, socket.AF_INET6), ]))
def __init__(self, hosts, client_id=CLIENT_ID, timeout=DEFAULT_SOCKET_TIMEOUT_SECONDS, correlation_id=0, metrics_responder=None): # We need one connection to bootstrap self.client_id = kafka_bytestring(client_id) self.timeout = timeout self.hosts = collect_hosts(hosts) self.correlation_id = correlation_id self.metrics_responder = metrics_responder # create connections only when we need them self.conns = {} self.brokers = {} # broker_id -> BrokerMetadata self.topics_to_brokers = {} # TopicAndPartition -> BrokerMetadata self.topic_partitions = {} # topic -> partition -> PartitionMetadata self.load_metadata_for_topics() # bootstrap with all metadata
def __init__(self, **configs): self.config = copy.copy(self.DEFAULT_CONFIG) for key in self.config: if key in configs: self.config[key] = configs[key] self.cluster = ClusterMetadata(**self.config) self._topics = set() # empty set will fetch all topic metadata self._metadata_refresh_in_progress = False self._selector = self.config['selector']() self._conns = Dict() # object to support weakrefs self._connecting = set() self._refresh_on_disconnects = True self._last_bootstrap = 0 self._bootstrap_fails = 0 self._wake_r, self._wake_w = socket.socketpair() self._wake_r.setblocking(False) self._wake_lock = threading.Lock() self._lock = threading.RLock() # when requests complete, they are transferred to this queue prior to # invocation. The purpose is to avoid invoking them while holding the # lock above. self._pending_completion = collections.deque() self._selector.register(self._wake_r, selectors.EVENT_READ) self._idle_expiry_manager = IdleConnectionManager(self.config['connections_max_idle_ms']) self._closed = False self._sensors = None if self.config['metrics']: self._sensors = KafkaClientMetrics(self.config['metrics'], self.config['metric_group_prefix'], weakref.proxy(self._conns)) self._bootstrap(collect_hosts(self.config['bootstrap_servers'])) # Check Broker Version if not set explicitly if self.config['api_version'] is None: check_timeout = self.config['api_version_auto_timeout_ms'] / 1000 self.config['api_version'] = self.check_version(timeout=check_timeout)
def __init__(self, hosts, client_id=CLIENT_ID, timeout=DEFAULT_SOCKET_TIMEOUT_SECONDS, ip_mapping_file=None): # We need one connection to bootstrap self.client_id = client_id self.timeout = timeout self.hosts = collect_hosts(hosts) if ip_mapping_file is not None: self.ip_mapping = collect_ip_mapping(ip_mapping_file) log.info("initialized with ip mapping: %s" % self.ip_mapping) else: self.ip_mapping = None log.info("intialize without ip mapping") # create connections only when we need them self.conns = {} self.brokers = {} # broker_id -> BrokerMetadata self.topics_to_brokers = {} # TopicAndPartition -> BrokerMetadata self.topic_partitions = {} # topic -> partition -> PartitionMetadata self.load_metadata_for_topics() # bootstrap with all metadata
def hosts(self): return collect_hosts(self._bootstrap_servers)