def __init__(self, options, **kargs): self.options = options self.topics = [] self.regAllowed = False self.validate = options['validation'] setLogLevel(namespace=NAMESPACE, levelStr=options['log_level']) setLogLevel(namespace=PROTOCOL_NAMESPACE, levelStr=options['protocol_log_level']) self.tess_heads = [ t.split('/')[0] for t in self.options['tess_topics'] ] self.tess_tails = [ t.split('/')[2] for t in self.options['tess_topics'] ] self.factory = MQTTFactory(profile=MQTTFactory.SUBSCRIBER) self.endpoint = clientFromString(reactor, self.options['broker']) if self.options['username'] == "": self.options['username'] = None self.options['password'] = None self.resetCounters() ClientService.__init__(self, self.endpoint, self.factory, retryPolicy=backoffPolicy( initialDelay=INITIAL_DELAY, factor=FACTOR, maxDelay=MAX_DELAY))
def retry(failed_attempts): if self._connect_successes > 0 and (self.max_retries == -1 or failed_attempts < self.max_retries): return backoffPolicy(**kwargs)(failed_attempts) else: print('hit stop') self.stop() return 100000000000000
def connect(): endpoint = TCP4ClientEndpoint(reactor, '127.0.0.1', 8750) factory = Factory() factory.protocol = amp.AMP service = ClientService(endpoint, factory, retryPolicy=backoffPolicy(0.5, 15.0)) service.startService() return service.whenConnected()
def test_does_not_overflow_with_large_factor_value(self): """ Even with unusual parameters, any L{OverflowError} within L{backoffPolicy()} will be caught and L{maxDelay} will be returned instead """ pol = backoffPolicy(1.0, 60.0, 1e10, jitter=lambda: 1) self.assertEqual(pol(1751), 61)
def __init(self, endpoint, factory): """ Create MQTT client service """ ClientService.__init__(self, endpoint, factory, retryPolicy=backoffPolicy())
def __init__(self, endpoint, factory, reactor=None): super().__init__(endpoint, factory, retryPolicy=backoffPolicy(), clock=reactor) self.reactor = _maybeGlobalReactor(reactor) self.config = Config() self.protocol = None self._system_name = None
def __init__(self, reactor): self.reactor = reactor factory = MQTTFactory(profile=MQTTFactory.PUBLISHER) endpoint = clientFromString(reactor, BROKER) ClientService.__init__(self, endpoint, factory, retryPolicy=backoffPolicy()) self.connected = False
def __init__(self, host, port, topic, user, password): self.broker_url = "tcp:%s:%s" % (host, port) self.topic = topic self.user = user self.password = password self.connected = False factory = MQTTFactory(profile=MQTTFactory.PUBLISHER) endpoint = clientFromString(reactor, self.broker_url) ClientService.__init__(self, endpoint, factory, retryPolicy=backoffPolicy())
def __init__(self, endpoint, factory, config, log, connections): ClientService.__init__(self, endpoint, factory, retryPolicy=backoffPolicy()) self.config = config self.log = log self.endpoint = endpoint self.connections = connections self.host = None self.port = None
def start(self): """Start connecting to ZenHub.""" self.__stopping = False factory = ZenPBClientFactory() self.__service = ClientService( self.__endpoint, factory, retryPolicy=backoffPolicy(initialDelay=0.5, factor=3.0), ) self.__service.startService() self.__prepForConnection()
def test_calculates_correct_values(self): """ Test that L{backoffPolicy()} calculates expected values """ pol = backoffPolicy(1.0, 60.0, 1.5, jitter=lambda: 1) self.assertAlmostEqual(pol(0), 2) self.assertAlmostEqual(pol(1), 2.5) self.assertAlmostEqual(pol(10), 58.6650390625) self.assertEqual(pol(20), 61) self.assertEqual(pol(100), 61)
def _delay_sequence(max_delay=10.0): """ Internal helper. Produces a sequence of delays (in seconds) for use with retries """ delay_function = backoffPolicy( maxDelay=max_delay, ) for attempt in itertools.count(): yield delay_function(attempt)
def test_does_not_overflow_on_high_attempts(self): """ L{backoffPolicy()} does not fail for large values of the attempt parameter. In previous versions, this test failed when attempt was larger than 1750. See https://twistedmatrix.com/trac/ticket/9476 """ pol = backoffPolicy(1.0, 60.0, 1.5, jitter=lambda: 1) self.assertEqual(pol(1751), 61) self.assertEqual(pol(1000000), 61)
def start(self): """Start connecting to ZenHub.""" self.__stopping = False factory = ZenPBClientFactory() self.__service = ClientService( self.__endpoint, factory, retryPolicy=backoffPolicy(initialDelay=0.5, factor=3.0), ) self.__service.startService() self.__prepForConnection()
def SetupConnection(self, host, port): self.__log.debug("Setting up connection! %s %s " % (host, port)) factory = Factory.forProtocol(NeoNode) endpoint = clientFromString(reactor,"tcp:host=%s:port=%s:timeout=5" % (host,port)) connectingService = ClientService( endpoint, factory, retryPolicy=backoffPolicy(.5, factor=3.0) ) connectingService.startService()
def setUp(self): super(NotificationSourceIntegrationTest, self).setUp() self.endpoint = AMQEndpoint( reactor, self.rabbit.config.hostname, self.rabbit.config.port, username="******", password="******", heartbeat=1) self.policy = backoffPolicy(initialDelay=0) self.factory = AMQFactory(spec=AMQP0_8_SPEC_PATH) self.service = ClientService( self.endpoint, self.factory, retryPolicy=self.policy) self.connector = NotificationConnector(self.service) self.source = NotificationSource(self.connector) self.client = yield self.endpoint.connect(self.factory) self.channel = yield self.client.channel(1) yield self.channel.channel_open() yield self.channel.queue_declare(queue="uuid") self.service.startService()
def __init__(self, options, **kargs): self.options = options setLogLevel(namespace=NAMESPACE, levelStr=options['log_level']) setLogLevel(namespace=PROTOCOL_NAMESPACE, levelStr=options['log_messages']) self.factory = MQTTFactory(profile=MQTTFactory.PUBLISHER) self.endpoint = clientFromString(reactor, self.options['broker']) self.task = None if self.options['username'] == "": self.options['username'] = None self.options['password'] = None ClientService.__init__(self, self.endpoint, self.factory, retryPolicy=backoffPolicy( initialDelay=INITIAL_DELAY, factor=FACTOR, maxDelay=MAX_DELAY)) self.queue = DeferredQueue()
def __init__(self, reactor, config): self.endpoint = clientFromString(reactor, config["endpoint"]) self.factory = Factory.forProtocol(MQTTProtocol) self.version = VERSION[config["version"]] self.clientId = config["client_id"] self.username = config["username"] self.appKey = config["app_key"] self.protocol = None # In flight subscribe request self.subscribe_requests = {} # Map topic and related function self.topics = {} # Map of publish waiting for ack self.publish_requests = {} ClientService.__init__(self, self.endpoint, self.factory, retryPolicy=backoffPolicy())
def setUp(self): super(NotificationSourceIntegrationTest, self).setUp() self.endpoint = AMQEndpoint(reactor, self.rabbit.config.hostname, self.rabbit.config.port, username="******", password="******", heartbeat=1) self.policy = backoffPolicy(initialDelay=0) self.factory = AMQFactory(spec=AMQP0_8_SPEC_PATH) self.service = ClientService(self.endpoint, self.factory, retryPolicy=self.policy) self.connector = NotificationConnector(self.service) self.source = NotificationSource(self.connector) self.client = yield self.endpoint.connect(self.factory) self.channel = yield self.client.channel(1) yield self.channel.channel_open() yield self.channel.queue_declare(queue="uuid") self.service.startService()
def __init__(self, options, reference): self.options = options self.namespace = 'ref.' if reference else 'test' self.label = self.namespace.upper() setLogLevel(namespace=self.label, levelStr=options['log_messages']) setLogLevel(namespace=self.namespace, levelStr=options['log_level']) self.log = Logger(namespace=self.namespace) self.reference = reference # Flag, is this instance for the reference photometer self.factory = self.buildFactory() self.protocol = None self.serport = None self.info = None # Photometer info self.buffer = CircularBuffer(options['size'], self.log) parts = chop(self.options['endpoint'], sep=':') if parts[0] == 'tcp': endpoint = clientFromString(reactor, self.options['endpoint']) ClientService.__init__(self, endpoint, self.factory, retryPolicy=backoffPolicy(initialDelay=0.5, factor=3.0))
def __init__(self, endpoint, factory, log, datastore): self.log=log self.datastore=datastore ClientService.__init__(self, endpoint, factory, retryPolicy=backoffPolicy())
def __init(self, endpoint, factory): ClientService.__init__(self, endpoint, factory, retryPolicy=backoffPolicy())
def policy(attempt): if maxRetries and attempt >= maxRetries: reactor.stop() return backoffPolicy()(attempt)
def run(self, make, start_reactor: bool = True, auto_reconnect: bool = False, log_level: str = 'info', endpoint: Optional[IStreamClientEndpoint] = None, reactor: Optional[IReactorCore] = None ) -> Union[type(None), Deferred]: """ Run the application component. :param make: A factory that produces instances of :class:`autobahn.twisted.wamp.ApplicationSession` when called with an instance of :class:`autobahn.wamp.types.ComponentConfig`. :param start_reactor: When ``True`` (the default) this method starts the Twisted reactor and doesn't return until the reactor stops. If there are any problems starting the reactor or connect()-ing, we stop the reactor and raise the exception back to the caller. :param auto_reconnect: :param log_level: :param endpoint: :param reactor: :return: None is returned, unless you specify ``start_reactor=False`` in which case the Deferred that connect() returns is returned; this will callback() with an IProtocol instance, which will actually be an instance of :class:`WampWebSocketClientProtocol` """ self.log.debug('{klass}.run()', klass=self.__class__.__name__) if start_reactor: # only select framework, set loop and start logging when we are asked # start the reactor - otherwise we are running in a program that likely # already tool care of all this. from twisted.internet import reactor txaio.use_twisted() txaio.config.loop = reactor txaio.start_logging(level=log_level) if callable(make): # factory for use ApplicationSession def create(): cfg = ComponentConfig(self.realm, self.extra, runner=self) try: session = make(cfg) except Exception: self.log.failure( 'ApplicationSession could not be instantiated: {log_failure.value}' ) if start_reactor and reactor.running: reactor.stop() raise else: return session else: create = make if self.url.startswith('rs'): # try to parse RawSocket URL isSecure, host, port = parse_rs_url(self.url) # use the first configured serializer if any (which means, auto-choose "best") serializer = self.serializers[0] if self.serializers else None # create a WAMP-over-RawSocket transport client factory transport_factory = WampRawSocketClientFactory( create, serializer=serializer) else: # try to parse WebSocket URL isSecure, host, port, resource, path, params = parse_ws_url( self.url) # create a WAMP-over-WebSocket transport client factory transport_factory = WampWebSocketClientFactory( create, url=self.url, serializers=self.serializers, proxy=self.proxy, headers=self.headers) # client WebSocket settings - similar to: # - http://crossbar.io/docs/WebSocket-Compression/#production-settings # - http://crossbar.io/docs/WebSocket-Options/#production-settings # The permessage-deflate extensions offered to the server offers = [PerMessageDeflateOffer()] # Function to accept permessage-deflate responses from the server def accept(response): if isinstance(response, PerMessageDeflateResponse): return PerMessageDeflateResponseAccept(response) # default WebSocket options for all client connections protocol_options = { 'version': WebSocketProtocol.DEFAULT_SPEC_VERSION, 'utf8validateIncoming': True, 'acceptMaskedServerFrames': False, 'maskClientFrames': True, 'applyMask': True, 'maxFramePayloadSize': 1048576, 'maxMessagePayloadSize': 1048576, 'autoFragmentSize': 65536, 'failByDrop': True, 'echoCloseCodeReason': False, 'serverConnectionDropTimeout': 1., 'openHandshakeTimeout': 2.5, 'closeHandshakeTimeout': 1., 'tcpNoDelay': True, 'perMessageCompressionOffers': offers, 'perMessageCompressionAccept': accept, 'autoPingInterval': 10., 'autoPingTimeout': 5., 'autoPingSize': 12, # see: https://github.com/crossbario/autobahn-python/issues/1327 and # _cancelAutoPingTimeoutCall 'autoPingRestartOnAnyTraffic': True, } # let user override above default options if self.websocket_options: protocol_options.update(self.websocket_options) # set websocket protocol options on Autobahn/Twisted protocol factory, from where it will # be applied for every Autobahn/Twisted protocol instance from the factory transport_factory.setProtocolOptions(**protocol_options) # supress pointless log noise transport_factory.noisy = False if endpoint: client = endpoint else: # if user passed ssl= but isn't using isSecure, we'll never # use the ssl argument which makes no sense. context_factory = None if self.ssl is not None: if not isSecure: raise RuntimeError( 'ssl= argument value passed to %s conflicts with the "ws:" ' 'prefix of the url argument. Did you mean to use "wss:"?' % self.__class__.__name__) context_factory = self.ssl elif isSecure: from twisted.internet.ssl import optionsForClientTLS context_factory = optionsForClientTLS(host) from twisted.internet import reactor if self.proxy is not None: from twisted.internet.endpoints import TCP4ClientEndpoint client = TCP4ClientEndpoint(reactor, self.proxy['host'], self.proxy['port']) transport_factory.contextFactory = context_factory elif isSecure: from twisted.internet.endpoints import SSL4ClientEndpoint assert context_factory is not None client = SSL4ClientEndpoint(reactor, host, port, context_factory) else: from twisted.internet.endpoints import TCP4ClientEndpoint client = TCP4ClientEndpoint(reactor, host, port) # as the reactor shuts down, we wish to wait until we've sent # out our "Goodbye" message; leave() returns a Deferred that # fires when the transport gets to STATE_CLOSED def cleanup(proto): if hasattr(proto, '_session') and proto._session is not None: if proto._session.is_attached(): return proto._session.leave() elif proto._session.is_connected(): return proto._session.disconnect() # when our proto was created and connected, make sure it's cleaned # up properly later on when the reactor shuts down for whatever reason def init_proto(proto): self._connect_successes += 1 reactor.addSystemEventTrigger('before', 'shutdown', cleanup, proto) return proto use_service = False if auto_reconnect: try: # since Twisted 16.1.0 from twisted.application.internet import ClientService from twisted.application.internet import backoffPolicy use_service = True except ImportError: use_service = False if use_service: # this code path is automatically reconnecting .. self.log.debug('using t.a.i.ClientService') if (self.max_retries is not None or self.initial_retry_delay is not None or self.max_retry_delay is not None or self.retry_delay_growth is not None or self.retry_delay_jitter is not None): if self.max_retry_delay > 0: kwargs = {} def _jitter(): j = 1 if self.retry_delay_jitter is None else self.retry_delay_jitter return random.random() * j for key, val in [('initialDelay', self.initial_retry_delay), ('maxDelay', self.max_retry_delay), ('factor', self.retry_delay_growth), ('jitter', _jitter)]: if val is not None: kwargs[key] = val # retry policy that will only try to reconnect if we connected # successfully at least once before (so it fails on host unreachable etc ..) def retry(failed_attempts): if self._connect_successes > 0 and ( self.max_retries == -1 or failed_attempts < self.max_retries): return backoffPolicy(**kwargs)(failed_attempts) else: print('hit stop') self.stop() return 100000000000000 else: # immediately reconnect (zero delay) def retry(_): return 0 else: retry = backoffPolicy() # https://twistedmatrix.com/documents/current/api/twisted.application.internet.ClientService.html self._client_service = ClientService(client, transport_factory, retryPolicy=retry) self._client_service.startService() d = self._client_service.whenConnected() else: # this code path is only connecting once! self.log.debug('using t.i.e.connect()') d = client.connect(transport_factory) # if we connect successfully, the arg is a WampWebSocketClientProtocol d.addCallback(init_proto) # if the user didn't ask us to start the reactor, then they # get to deal with any connect errors themselves. if start_reactor: # if an error happens in the connect(), we save the underlying # exception so that after the event-loop exits we can re-raise # it to the caller. class ErrorCollector(object): exception = None def __call__(self, failure): self.exception = failure.value reactor.stop() connect_error = ErrorCollector() d.addErrback(connect_error) # now enter the Twisted reactor loop reactor.run() # if the ApplicationSession sets an "error" key on the self.config.extra dictionary, which # has been set to the self.extra dictionary, extract the Exception from that and re-raise # it as the very last one (see below) exciting back to the caller of self.run() app_error = self.extra.get('error', None) # if we exited due to a connection error, raise that to the caller if connect_error.exception: raise connect_error.exception elif app_error: raise app_error else: # let the caller handle any errors return d
def run(self, make, start_reactor=True, auto_reconnect=False, log_level='info', endpoint=None, reactor=None): """ Run the application component. :param make: A factory that produces instances of :class:`autobahn.twisted.wamp.ApplicationSession` when called with an instance of :class:`autobahn.wamp.types.ComponentConfig`. :type make: callable :param start_reactor: When ``True`` (the default) this method starts the Twisted reactor and doesn't return until the reactor stops. If there are any problems starting the reactor or connect()-ing, we stop the reactor and raise the exception back to the caller. :returns: None is returned, unless you specify ``start_reactor=False`` in which case the Deferred that connect() returns is returned; this will callback() with an IProtocol instance, which will actually be an instance of :class:`WampWebSocketClientProtocol` """ self.log.debug('{klass}.run()', klass=self.__class__.__name__) if start_reactor: # only select framework, set loop and start logging when we are asked # start the reactor - otherwise we are running in a program that likely # already tool care of all this. from twisted.internet import reactor txaio.use_twisted() txaio.config.loop = reactor txaio.start_logging(level=log_level) if callable(make): # factory for use ApplicationSession def create(): cfg = ComponentConfig(self.realm, self.extra, runner=self) try: session = make(cfg) except Exception: self.log.failure('ApplicationSession could not be instantiated: {log_failure.value}') if start_reactor and reactor.running: reactor.stop() raise else: return session else: create = make if self.url.startswith(u'rs'): # try to parse RawSocket URL .. isSecure, host, port = parse_rs_url(self.url) # use the first configured serializer if any (which means, auto-choose "best") serializer = self.serializers[0] if self.serializers else None # create a WAMP-over-RawSocket transport client factory transport_factory = WampRawSocketClientFactory(create, serializer=serializer) else: # try to parse WebSocket URL .. isSecure, host, port, resource, path, params = parse_ws_url(self.url) # create a WAMP-over-WebSocket transport client factory transport_factory = WampWebSocketClientFactory(create, url=self.url, serializers=self.serializers, proxy=self.proxy, headers=self.headers) # client WebSocket settings - similar to: # - http://crossbar.io/docs/WebSocket-Compression/#production-settings # - http://crossbar.io/docs/WebSocket-Options/#production-settings # The permessage-deflate extensions offered to the server .. offers = [PerMessageDeflateOffer()] # Function to accept permessage_delate responses from the server .. def accept(response): if isinstance(response, PerMessageDeflateResponse): return PerMessageDeflateResponseAccept(response) # set WebSocket options for all client connections transport_factory.setProtocolOptions(maxFramePayloadSize=1048576, maxMessagePayloadSize=1048576, autoFragmentSize=65536, failByDrop=False, openHandshakeTimeout=2.5, closeHandshakeTimeout=1., tcpNoDelay=True, autoPingInterval=10., autoPingTimeout=5., autoPingSize=4, perMessageCompressionOffers=offers, perMessageCompressionAccept=accept) # supress pointless log noise transport_factory.noisy = False if endpoint: client = endpoint else: # if user passed ssl= but isn't using isSecure, we'll never # use the ssl argument which makes no sense. context_factory = None if self.ssl is not None: if not isSecure: raise RuntimeError( 'ssl= argument value passed to %s conflicts with the "ws:" ' 'prefix of the url argument. Did you mean to use "wss:"?' % self.__class__.__name__) context_factory = self.ssl elif isSecure: from twisted.internet.ssl import optionsForClientTLS context_factory = optionsForClientTLS(host) from twisted.internet import reactor if self.proxy is not None: from twisted.internet.endpoints import TCP4ClientEndpoint client = TCP4ClientEndpoint(reactor, self.proxy['host'], self.proxy['port']) transport_factory.contextFactory = context_factory elif isSecure: from twisted.internet.endpoints import SSL4ClientEndpoint assert context_factory is not None client = SSL4ClientEndpoint(reactor, host, port, context_factory) else: from twisted.internet.endpoints import TCP4ClientEndpoint client = TCP4ClientEndpoint(reactor, host, port) # as the reactor shuts down, we wish to wait until we've sent # out our "Goodbye" message; leave() returns a Deferred that # fires when the transport gets to STATE_CLOSED def cleanup(proto): if hasattr(proto, '_session') and proto._session is not None: if proto._session.is_attached(): return proto._session.leave() elif proto._session.is_connected(): return proto._session.disconnect() # when our proto was created and connected, make sure it's cleaned # up properly later on when the reactor shuts down for whatever reason def init_proto(proto): self._connect_successes += 1 reactor.addSystemEventTrigger('before', 'shutdown', cleanup, proto) return proto use_service = False if auto_reconnect: try: # since Twisted 16.1.0 from twisted.application.internet import ClientService from twisted.application.internet import backoffPolicy use_service = True except ImportError: use_service = False if use_service: # this code path is automatically reconnecting .. self.log.debug('using t.a.i.ClientService') if self.max_retries or self.initial_retry_delay or self.max_retry_delay or self.retry_delay_growth or self.retry_delay_jitter: kwargs = {} for key, val in [('initialDelay', self.initial_retry_delay), ('maxDelay', self.max_retry_delay), ('factor', self.retry_delay_growth), ('jitter', lambda: random.random() * self.retry_delay_jitter)]: if val: kwargs[key] = val # retry policy that will only try to reconnect if we connected # successfully at least once before (so it fails on host unreachable etc ..) def retry(failed_attempts): if self._connect_successes > 0 and (self.max_retries == -1 or failed_attempts < self.max_retries): return backoffPolicy(**kwargs)(failed_attempts) else: print('hit stop') self.stop() return 100000000000000 else: retry = backoffPolicy() self._client_service = ClientService(client, transport_factory, retryPolicy=retry) self._client_service.startService() d = self._client_service.whenConnected() else: # this code path is only connecting once! self.log.debug('using t.i.e.connect()') d = client.connect(transport_factory) # if we connect successfully, the arg is a WampWebSocketClientProtocol d.addCallback(init_proto) # if the user didn't ask us to start the reactor, then they # get to deal with any connect errors themselves. if start_reactor: # if an error happens in the connect(), we save the underlying # exception so that after the event-loop exits we can re-raise # it to the caller. class ErrorCollector(object): exception = None def __call__(self, failure): self.exception = failure.value reactor.stop() connect_error = ErrorCollector() d.addErrback(connect_error) # now enter the Twisted reactor loop reactor.run() # if we exited due to a connection error, raise that to the # caller if connect_error.exception: raise connect_error.exception else: # let the caller handle any errors return d
def __init__(self, endpoint, factory, keepalive, access_key=None, secret_key=None): self.keepalive = keepalive self.protocol = None self.access_key = access_key self.secret_key = secret_key ClientService.__init__(self, endpoint, factory, retryPolicy=backoffPolicy(maxDelay=keepalive))
NotCoordinatorForConsumerError, NotLeaderForPartitionError, OffsetsLoadInProgressError, PartitionUnavailableError, RequestTimedOutError, TopicAndPartition, UnknownError, UnknownTopicOrPartitionError, _check_error, ) from .kafkacodec import KafkaCodec, _ReprRequest log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) _DEFAULT_RETRY_POLICY = backoffPolicy() class KafkaClient(object): """Cluster-aware Kafka client `KafkaClient` maintains a cache of cluster metadata (brokers, topics, etc.) and routes each request to the appropriate broker connection. It must be bootstrapped with the address of at least one Kafka broker to retrieve the cluster metadata. You will typically use this class in combination with `Producer` or `Consumer` which provide higher-level behavior. When done with the client, call :meth:`.close()` to permanently dispose of it. This terminates any open connections and release resources.
def __init__(self, endpoint, factory): super(MQTTService, self).__init__(endpoint, factory, retryPolicy=backoffPolicy())
def policy(attempt): if maxRetries and attempt >= maxRetries: reactor.stop() return backoffPolicy()(attempt)
def connect(self, auto_reconnect=True): def init(proto): reactor.addSystemEventTrigger('before', 'shutdown', cleanup, proto) return proto def cleanup(proto): session = getattr(proto, '_session', None) if session is None: return if session.is_attached(): return session.leave() elif session.is_connected(): return session.disconnect() from twisted.internet import reactor transport_factory = WampWebSocketClientFactory(self, str(self.address)) transport_factory.setProtocolOptions( maxFramePayloadSize=1048576, maxMessagePayloadSize=1048576, autoFragmentSize=65536, failByDrop=False, openHandshakeTimeout=OPEN_HANDSHAKE_TIMEOUT, closeHandshakeTimeout=CLOSE_HANDSHAKE_TIMEOUT, tcpNoDelay=True, autoPingInterval=AUTO_PING_INTERVAL, autoPingTimeout=AUTO_PING_TIMEOUT, autoPingSize=4, ) if self.address.ssl: if self._cert_manager: cert_data = self._cert_manager.read_certificate() authority = twisted_ssl.Certificate.loadPEM(cert_data) else: authority = None context_factory = optionsForClientTLS(X509_COMMON_NAME, trustRoot=authority) self._client = SSL4ClientEndpoint(reactor, self.address.host, self.address.port, context_factory) else: if self._use_ipv6: endpoint_cls = TCP6ClientEndpoint else: endpoint_cls = TCP4ClientEndpoint self._client = endpoint_cls(reactor, self.address.host, self.address.port) if auto_reconnect: self._reconnect_service = ClientService( endpoint=self._client, factory=transport_factory, retryPolicy=backoffPolicy(factor=BACKOFF_POLICY_FACTOR)) self._reconnect_service.startService() deferred = self._reconnect_service.whenConnected() else: deferred = self._client.connect(transport_factory) deferred.addCallback(init) deferred.addErrback(self.ready.errback) return self.ready
def __init(self, endpoint, factory): ClientService.__init__(self, endpoint, factory, retryPolicy=backoffPolicy())
from .common import ( BrokerMetadata, BrokerResponseError, CancelledError, ClientError, ConsumerCoordinatorNotAvailableError, DefaultKafkaPort, FailedPayloadsError, KafkaError, KafkaUnavailableError, LeaderUnavailableError, NotCoordinatorForConsumerError, NotLeaderForPartitionError, OffsetsLoadInProgressError, PartitionUnavailableError, RequestTimedOutError, TopicAndPartition, UnknownError, UnknownTopicOrPartitionError, _check_error, ) from .kafkacodec import KafkaCodec log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) _DEFAULT_RETRY_POLICY = backoffPolicy() class KafkaClient(object): """Cluster-aware Kafka client `KafkaClient` maintains a cache of cluster metadata (brokers, topics, etc.) and routes each request to the appropriate broker connection. It must be bootstrapped with the address of at least one Kafka broker to retrieve the cluster metadata. You will typically use this class in combination with `Producer` or `Consumer` which provide higher-level behavior. When done with the client, call :meth:`.close()` to permanently dispose of it. This terminates any open connections and release resources.