def test_retry_if_not_result(self): retry = (tenacity.retry_if_not_result(lambda x: x == 1)) def r(fut): retry_state = make_retry_state(1, 1.0, last_result=fut) return retry(retry_state) self.assertTrue(r(tenacity.Future.construct(1, 2, False))) self.assertFalse(r(tenacity.Future.construct(1, 1, False)))
def wait_for_operation(operation_request, test_success_fn, timeout_sec=_WAIT_FOR_OPERATION_SEC, wait_sec=_WAIT_FIXED_SEC): retryer = tenacity.Retrying( retry=(tenacity.retry_if_not_result(test_success_fn) | tenacity.retry_if_exception_type()), wait=tenacity.wait_fixed(wait_sec), stop=tenacity.stop_after_delay(timeout_sec), after=tenacity.after_log(logger, logging.DEBUG), reraise=True) return retryer(operation_request.execute)
self.s3_client_provider = s3_client_provider self.progress = progress self.done = done self.run = run def _copy_file_list_last_retry(retry_state): return retry_state.fn( *retry_state.args, **{**retry_state.kwargs, 'exceptions_to_ignore': ()}, ) @retry(stop=stop_after_attempt(MAX_COPY_FILE_LIST_RETRIES - 1), wait=wait_exponential(multiplier=1, min=1, max=10), retry=retry_if_not_result(all), retry_error_callback=_copy_file_list_last_retry) def _copy_file_list_internal(file_list, results, message, callback, exceptions_to_ignore=(ClientError,)): """ Takes a list of tuples (src, dest, size) and copies the data in parallel. `results` is the list where results will be stored. Returns versioned URLs for S3 destinations and regular file URLs for files. """ if not file_list: return [] assert len(file_list) == len(results) total_size = sum(size for (_, _, size), result in zip(file_list, results) if result is None) lock = Lock()
def test_retry_if_not_result(self): r = (tenacity.retry_if_not_result(lambda x: x == 1)) self.assertTrue(r(tenacity.Future.construct(1, 2, False))) self.assertFalse(r(tenacity.Future.construct(1, 1, False)))