def test_logging(self): response = HTTPResponse( body= '{"code":"too many requests","message":"org 04014de4ed590000 has exceeded limited_write plan limit"}' ) response.headers.add('Retry-After', '63') with self.assertLogs('influxdb_client.client.write.retry', level='WARNING') as cm: WritesRetry(total=5, backoff_factor=1, max_retry_delay=15) \ .increment(response=response) \ .increment(error=Exception("too many requests")) \ .increment(url='http://localhost:9999') self.assertEqual( "WARNING:influxdb_client.client.write.retry:The retriable error occurred during request. " "Reason: 'org 04014de4ed590000 has exceeded limited_write plan limit'. Retry in 63s.", cm.output[0]) self.assertEqual( "WARNING:influxdb_client.client.write.retry:The retriable error occurred during request. " "Reason: 'too many requests'.", cm.output[1]) self.assertEqual( "WARNING:influxdb_client.client.write.retry:The retriable error occurred during request. " "Reason: 'Failed request to: http://localhost:9999'.", cm.output[2])
def test_get_retry_after_jitter(self): response = HTTPResponse() response.headers.add('Retry-After', '5') retry = WritesRetry(jitter_interval=2) retry_after = retry.get_retry_after(response) self.assertGreater(retry_after, 5) self.assertLessEqual(retry_after, 7)
def test_backoff_max(self): retry = WritesRetry(total=5, retry_interval=1, max_retry_delay=15) \ .increment() \ .increment() \ .increment() \ .increment() \ .increment() self.assertLessEqual(retry.get_backoff_time(), 15)
def test_backoff_increment(self): retry = WritesRetry(total=5, retry_interval=4).increment() self.assertEqual(retry.total, 4) self.assertEqual(retry.is_exhausted(), False) backoff_time = retry.get_backoff_time() self.assertGreater(backoff_time, 4) self.assertLessEqual(backoff_time, 8)
def test_backoff_max(self): retry = WritesRetry(total=5, backoff_factor=1, max_retry_delay=15)\ .increment()\ .increment()\ .increment()\ .increment()\ .increment() self.assertEqual(retry.get_backoff_time(), 15)
def test_backoff_jitter(self): retry = WritesRetry(total=5, backoff_factor=4, jitter_interval=2).increment() self.assertEqual(retry.total, 4) self.assertEqual(retry.is_exhausted(), False) backoff_time = retry.get_backoff_time() self.assertGreater(backoff_time, 4) self.assertLessEqual(backoff_time, 6)
def test_backoff_exponential_base(self): retry = WritesRetry(total=5, backoff_factor=2, exponential_base=2) retry = retry.increment() self.assertEqual(retry.get_backoff_time(), 2) retry = retry.increment() self.assertEqual(retry.get_backoff_time(), 4) retry = retry.increment() self.assertEqual(retry.get_backoff_time(), 8) retry = retry.increment() self.assertEqual(retry.get_backoff_time(), 16)
def test_copy(self): retry = WritesRetry(exponential_base=3, max_retry_delay=145, total=10) self.assertEqual(retry.max_retry_delay, 145) self.assertEqual(retry.exponential_base, 3) self.assertEqual(retry.total, 10) retry = retry.increment() self.assertEqual(retry.max_retry_delay, 145) self.assertEqual(retry.exponential_base, 3) self.assertEqual(retry.total, 9) retry = retry.increment() self.assertEqual(retry.max_retry_delay, 145) self.assertEqual(retry.exponential_base, 3) self.assertEqual(retry.total, 8)
def connectInfluxDatabase(self): try: # prepare database self.logger.debug( f'Connecting to Influx with: Host:{self.influx_host}, Port: {self.influx_port}, User: {self.influx_user}, DB: {self.influx_db}' ) if (self.influx_version == 1): pass self.influxClient = DataFrameClient(self.influx_host, self.influx_port, self.influx_user, self.influx_pwd, self.influx_db) elif (self.influx_version == 2): retries = WritesRetry(total=20, backoff_factor=1, exponential_base=1) self.influxClient = InfluxDBClient( url=f"http://{self.influx_host}:{self.influx_port}", token=self.influx_token, org=self.influx_org, retries=retries, timeout=180_000) self.influx_query_api = self.influxClient.query_api() self.influx_write_api = self.influxClient.write_api( write_options=WriteOptions( batch_size=500, write_type=WriteType.synchronous, flush_interval=10_000, jitter_interval=2_000, retry_interval=30_000, max_retries=25, max_retry_delay=60_000, exponential_base=2)) #self.influx_write_api = self.influxClient.write_api(write_options=SYNCHRONOUS) except Exception as e: self.logger.exception('Crash!', exc_info=e) sys.exit(99)
def test_backoff(self): retry = WritesRetry(total=5, backoff_factor=1, max_retry_delay=550) self.assertEqual(retry.total, 5) self.assertEqual(retry.is_exhausted(), False) self.assertEqual(retry.get_backoff_time(), 0) retry = retry.increment() self.assertEqual(retry.total, 4) self.assertEqual(retry.is_exhausted(), False) self.assertEqual(retry.get_backoff_time(), 1) retry = retry.increment() self.assertEqual(retry.total, 3) self.assertEqual(retry.is_exhausted(), False) self.assertEqual(retry.get_backoff_time(), 5) retry = retry.increment() self.assertEqual(retry.total, 2) self.assertEqual(retry.is_exhausted(), False) self.assertEqual(retry.get_backoff_time(), 25) retry = retry.increment() self.assertEqual(retry.total, 1) self.assertEqual(retry.is_exhausted(), False) self.assertEqual(retry.get_backoff_time(), 125) retry = retry.increment() self.assertEqual(retry.total, 0) self.assertEqual(retry.is_exhausted(), False) self.assertEqual(retry.get_backoff_time(), 550) with self.assertRaises(MaxRetryError) as cm: retry.increment() exception = cm.exception self.assertEqual("too many error responses", exception.reason.args[0])
def test_is_retry_430(self): retry = WritesRetry(allowed_methods=["POST"]) self.assertTrue(retry.is_retry("POST", 430, True))
def test_is_retry_428(self): retry = WritesRetry(allowed_methods=["POST"]) self.assertFalse(retry.is_retry("POST", 428, True))
def test_is_retry_430(self): retry = WritesRetry(method_whitelist=["POST"]) self.assertTrue(retry.is_retry("POST", 430, True))
def test_get_retry_after(self): response = HTTPResponse() response.headers.add('Retry-After', '5') retry = WritesRetry() self.assertEqual(retry.get_retry_after(response), 5)
def test_is_retry_retry_after_header_is_not_required(self): retry = WritesRetry(method_whitelist=["POST"]) self.assertTrue(retry.is_retry("POST", 429, False))
def test_is_retry_respect_method(self): retry = WritesRetry(method_whitelist=["POST"]) self.assertFalse(retry.is_retry("GET", 429, False))
# # Main # run: bulk-load.py <hostname>, where hostname - node to process bulkstats fList = sys.argv[1] + "*.csv" print('looking for files:', fList) workingSchemas = readWorkingSchemas(bulkList) di = bulkDocDict(bulkDocFile, bulkDR) bulkDict = {} bulkDict.update(bulkCfgDict(bulkCfgFileMME)) bulkDict.update(bulkCfgDict(bulkCfgFileSAE)) retries = WritesRetry(total=3, retry_interval=1, exponential_base=2) client = InfluxDBClient(url=url, token=token, org=org, retries=retries, enable_gzip=True) write_api = client.write_api(write_options=SYNCHRONOUS) os.chdir(bulkDir) blist = [] for bfile in sorted(glob.glob(fList)): blist += [bfile] for fil in blist: if workOnFile(fil) is True: continue else:
def test_is_retry_428(self): retry = WritesRetry(method_whitelist=["POST"]) self.assertFalse(retry.is_retry("POST", 428, True))
def test_is_retry_retry_after_header_is_not_required(self): retry = WritesRetry(allowed_methods=["POST"]) self.assertTrue(retry.is_retry("POST", 429, False))
def test_is_retry_respect_method(self): retry = WritesRetry(allowed_methods=["POST"]) self.assertFalse(retry.is_retry("GET", 429, False))
""" for row in DictReader(open(csv_file_path, 'r')): point = Point('financial-analysis') \ .tag('type', 'vix-daily') \ .field('open', float(row['VIX Open'])) \ .field('high', float(row['VIX High'])) \ .field('low', float(row['VIX Low'])) \ .field('close', float(row['VIX Close'])) \ .time(row['Date']) yield point """ Define Retry strategy - 3 attempts => 2, 4, 8 """ retries = WritesRetry(total=3, backoff_factor=1, exponential_base=2) client = InfluxDBClient(url='http://localhost:8086', token='my-token', org='my-org', retries=retries) """ Use synchronous version of WriteApi to strongly depends on result of write """ write_api = client.write_api(write_options=SYNCHRONOUS) """ Prepare batches from generator """ batches = rx \ .from_iterable(csv_to_generator('vix-daily.csv')) \ .pipe(ops.buffer_with_count(500))