def retry_wrapper(*args, **kwargs): sleep_generator = core_retry.exponential_sleep_generator( _DEFAULT_INITIAL_DELAY, _DEFAULT_MAXIMUM_DELAY, _DEFAULT_DELAY_MULTIPLIER, ) for sleep_time in itertools.islice(sleep_generator, retries + 1): try: result = callback(*args, **kwargs) if isinstance(result, tasklets.Future): result = yield result except Exception as e: # `e` is removed from locals at end of block error = e # See: https://goo.gl/5J8BMK if not is_transient_error(error): raise error else: raise tasklets.Return(result) yield tasklets.sleep(sleep_time) raise core_exceptions.RetryError( "Maximum number of {} retries exceeded while calling {}".format( retries, callback), cause=error, )
def retry_wrapper(*args, **kwargs): from google.cloud.ndb import context as context_module sleep_generator = core_retry.exponential_sleep_generator( _DEFAULT_INITIAL_DELAY, _DEFAULT_MAXIMUM_DELAY, _DEFAULT_DELAY_MULTIPLIER, ) for sleep_time in itertools.islice(sleep_generator, retries + 1): context = context_module.get_context() if not context.in_retry(): # We need to be able to identify if we are inside a nested # retry. Here, we set the retry state in the context. This is # used for deciding if an exception should be raised # immediately or passed up to the outer retry block. context.set_retry_state(repr(callback)) try: result = callback(*args, **kwargs) if isinstance(result, tasklets.Future): result = yield result except exceptions.NestedRetryException as e: error = e except Exception as e: # `e` is removed from locals at end of block error = e # See: https://goo.gl/5J8BMK if not is_transient_error(error): # If we are in an inner retry block, use special nested # retry exception to bubble up to outer retry. Else, raise # actual exception. if context.get_retry_state() != repr(callback): message = getattr(error, "message", str(error)) raise exceptions.NestedRetryException(message) else: raise error else: raise tasklets.Return(result) finally: # No matter what, if we are exiting the top level retry, # clear the retry state in the context. if context.get_retry_state() == repr( callback): # pragma: NO BRANCH context.clear_retry_state() yield tasklets.sleep(sleep_time) raise core_exceptions.RetryError( "Maximum number of {} retries exceeded while calling {}".format( retries, callback), cause=error, )
def some_tasklet(): # This tasklet runs in the main loop. In order to get results back # from the transaction_async calls, the run_inner_loop idle handler # will have to be run. yield [ _transaction.transaction_async(callback), _transaction.transaction_async(callback), ] # Scheduling this sleep call forces the run_inner_loop idle handler # to be run again so we can run it in the case when there is no # more work to be done in the transaction. (Branch coverage.) yield tasklets.sleep(0) raise tasklets.Return("I tried, momma.")
def retry_wrapper(*args, **kwargs): sleep_generator = core_retry.exponential_sleep_generator(0.1, 1) attempts = 5 for sleep_time in sleep_generator: # pragma: NO BRANCH # pragma is required because loop never exits normally, it only gets # raised out of. attempts -= 1 try: result = yield wrapped(*args, **kwargs) raise tasklets.Return(result) except transient_errors: if not attempts: raise yield tasklets.sleep(sleep_time)
def test_sleep(): with pytest.raises(NotImplementedError): tasklets.sleep()
def callback(): # Scheduling the sleep call here causes control to go back up to # the main loop before this tasklet, running in the transaction # loop, has finished, forcing a call to run_inner_loop via the idle # handler. yield tasklets.sleep(0)
def test_sleep(time_module, context): time_module.time.side_effect = [0, 0, 1] future = tasklets.sleep(1) assert future.get_result() is None time_module.sleep.assert_called_once_with(1)