def test_retry_policy(self): http_client_factory = HttpClientFactory(None, None, {}) http_client_factory.update_upstream( 'nn', 'retry_policy=http_503,non_idempotent_503|server=172.17.0.1:2800') upstream = http_client_factory.upstreams.get('nn') self.assertEqual({503: True}, upstream.retry_policy.statuses)
def setUp(self): AsyncHTTPClient.configure( 'tornado.curl_httpclient.CurlAsyncHTTPClient') super().setUp() self.http_client_factory = HttpClientFactory('testapp', self.http_client) self.balancing_client = self.http_client_factory.get_http_client() tornado.options.options.datacenter = 'test'
def test_init_from_config(self): http_client_factory = HttpClientFactory( None, None, { 'nn': { 'config': { 'max_tries': 10, 'max_fails': 100, 'request_timeout_sec': 0.1, 'connect_timeout_sec': 1.3, 'max_timeout_tries': 4 }, 'servers': [{ 'server': '172.17.0.1:2800' }] } }) upstream = http_client_factory.upstreams.get('nn') self.assertEqual(10, upstream.max_tries) self.assertEqual(100, upstream.max_fails) self.assertEqual(10, upstream.fail_timeout) self.assertEqual(1.3, upstream.connect_timeout) self.assertEqual(0.1, upstream.request_timeout) self.assertEqual(4, upstream.max_timeout_tries) self.assertEqual(1, len(upstream.servers)) self.assertEqual('172.17.0.1:2800', upstream.servers[0].address)
async def init(self): self.transforms.insert(0, partial(DebugTransform, self)) self.available_integrations, integration_futures = integrations.load_integrations( self) await asyncio.gather( *[future for future in integration_futures if future]) kafka_cluster = options.http_client_metrics_kafka_cluster send_metrics_to_kafka = kafka_cluster and kafka_cluster in options.kafka_clusters if kafka_cluster and kafka_cluster not in options.kafka_clusters: app_logger.warning( 'kafka cluster for http client metrics "%s" is not present in "kafka_clusters" option, ' 'metrics will be disabled', kafka_cluster) else: app_logger.info('kafka metrics are %s', 'enabled' if send_metrics_to_kafka else 'disabled') kafka_producer = self.get_kafka_producer( kafka_cluster) if send_metrics_to_kafka else None self.http_client_factory = HttpClientFactory( self.app, self.tornado_http_client, getattr(self.config, 'http_upstreams', {}), statsd_client=self.statsd_client, kafka_producer=kafka_producer)
async def init(self): self.service_discovery_client = get_async_service_discovery(options) self.transforms.insert(0, partial(DebugTransform, self)) AsyncHTTPClient.configure('tornado.curl_httpclient.CurlAsyncHTTPClient', max_clients=options.max_http_clients) self.tornado_http_client = AsyncHTTPClient() if options.max_http_clients_connects is not None: self.tornado_http_client._multi.setopt(pycurl.M_MAXCONNECTS, options.max_http_clients_connects) self.available_integrations, integration_futures = integrations.load_integrations(self) await asyncio.gather(*[future for future in integration_futures if future]) kafka_cluster = options.http_client_metrics_kafka_cluster send_metrics_to_kafka = kafka_cluster and kafka_cluster in options.kafka_clusters if kafka_cluster and kafka_cluster not in options.kafka_clusters: app_logger.warning( 'kafka cluster for http client metrics "%s" is not present in "kafka_clusters" option, ' 'metrics will be disabled', kafka_cluster ) else: app_logger.info('kafka metrics are %s', 'enabled' if send_metrics_to_kafka else 'disabled') kafka_producer = self.get_kafka_producer(kafka_cluster) if send_metrics_to_kafka else None self.http_client_factory = HttpClientFactory(self.app, self.tornado_http_client, getattr(self.config, 'http_upstreams', {}), statsd_client=self.statsd_client, kafka_producer=kafka_producer)
def test_from_config_string(self): http_client_factory = HttpClientFactory(None, None, {}) http_client_factory.update_upstream( 'nn', 'max_tries=10 fail_timeout_sec=1 max_fails=30 request_timeout_sec=0.2 ' 'connect_timeout_sec=1 max_timeout_tries=2 |' 'server=172.17.0.1:2800') upstream = http_client_factory.upstreams.get('nn') self.assertEqual(10, upstream.max_tries) self.assertEqual(30, upstream.max_fails) self.assertEqual(1, upstream.fail_timeout) self.assertEqual(1, upstream.connect_timeout) self.assertEqual(0.2, upstream.request_timeout) self.assertEqual(2, upstream.max_timeout_tries) self.assertEqual(1, len(upstream.servers)) self.assertEqual('172.17.0.1:2800', upstream.servers[0].address)
def test_empty_server_list_update(self): http_client_factory = HttpClientFactory(None, None, {}) http_client_factory.update_upstream( 'nn', 'max_tries=10 | server=172.17.0.1:2800') self.assertEqual(1, len(http_client_factory.upstreams)) http_client_factory.update_upstream('nn', '|') self.assertEqual(0, len(http_client_factory.upstreams))
class BalancingClientMixin: def setUp(self): AsyncHTTPClient.configure( 'tornado.curl_httpclient.CurlAsyncHTTPClient') super().setUp() self.http_client_factory = HttpClientFactory('testapp', self.http_client) self.balancing_client = self.http_client_factory.get_http_client() tornado.options.options.datacenter = 'test' def get_upstream_config(self): return {'request_timeout_sec': 0.5} def register_ports_for_upstream(self, *ports): upstream = Upstream( 'test', self.get_upstream_config(), [Server(f'127.0.0.1:{port}', dc='test') for port in ports]) self.http_client_factory.upstreams[upstream.name] = upstream
def test_empty_server_list_init(self): http_client_factory = HttpClientFactory(None, None, {}) http_client_factory.update_upstream('nn', '|') self.assertEqual(0, len(http_client_factory.upstreams))