def _update_worker(self, queue_config, _): """ Handles updates to a queue configuration node. Since this runs in a separate thread, it doesn't change any state directly. Instead, it just acts as a bridge back to the main IO loop. Args: queue_config: A JSON string specifying queue configuration. """ # Prevent further watches if they are no longer needed. if queue_config is None: try: project_exists = self.zk_client.exists( '/appscale/projects/{}'.format(self.project_id)) is not None except ZookeeperError: # If the project has been deleted, an extra "exists" watch will remain. project_exists = True if not project_exists: self._stopped = True return False persistent_update_worker = utils.retry_data_watch_coroutine( self.queues_node, self.update_worker ) main_io_loop = IOLoop.instance() main_io_loop.add_callback(persistent_update_worker, queue_config)
def test_concurrency_between_different_nodes(self): shared_data = [] @gen.coroutine def func(call_arg): for _ in xrange(20): yield gen.sleep(0.001) shared_data.append(call_arg) wrapped_1 = utils.retry_data_watch_coroutine("node-1", func) wrapped_2 = utils.retry_data_watch_coroutine("node-2", func) wrapped_3 = utils.retry_data_watch_coroutine("node-3", func) wrapped_4 = utils.retry_data_watch_coroutine("node-4", func) yield [wrapped_1(1), wrapped_2(2), wrapped_3(3), wrapped_4(4)] self.assertNotEqual(shared_data, [1]*20 + [2]*20 + [3]*20 + [4]*20)
def test_exception_filter(self, wait_mock): wait_mock.side_effect = testing.gen.coroutine(lambda sec: False) def func(exc_class, msg, retries_to_success): retries_to_success['counter'] -= 1 if retries_to_success['counter'] <= 0: return "Succeeded" raise exc_class(msg) def err_filter(exception): return isinstance(exception, ValueError) wrapped = utils.retry_data_watch_coroutine( "node", func, retry_on_exception=err_filter ) # Test retry helps. result = yield wrapped(ValueError, "Matched", {"counter": 3}) self.assertEqual(result, "Succeeded") # Test retry not applicable. try: yield wrapped(TypeError, "Failed", {"counter": 3}) self.fail("Exception was expected") except TypeError: pass
def test_no_errors(self, logger_mock, wait_mock): wait_mock.side_effect = testing.gen.coroutine(lambda sec: sec) # Call dummy lambda persistently. persistent_work = utils.retry_data_watch_coroutine( "node", lambda: "No Errors" ) result = yield persistent_work() # Assert outcomes. self.assertEqual(result, "No Errors") self.assertEqual(wait_mock.call_args_list, []) self.assertEqual(logger_mock.call_args_list, [])
def _update_version_watch(self, new_version, _): """ Handles updates to a version node. Args: new_version: A JSON string specifying version details. """ if new_version is None: self._stopped = True return False persistent_update_version = utils.retry_data_watch_coroutine( self.version_node, self.update_version ) main_io_loop = IOLoop.instance() main_io_loop.add_callback(persistent_update_version, new_version)
def test_concurrency_with_failures(self): shared_data = [] @gen.coroutine def func(call_arg): for _ in xrange(3): yield gen.sleep(0.001) shared_data.append(call_arg) if call_arg != 4: raise ValueError("Why not 4?") wrapped = utils.retry_data_watch_coroutine("node", func) yield [wrapped(1), wrapped(2), wrapped(3), wrapped(4)] self.assertEqual(shared_data, [1]*3 + [2]*3 + [3]*3 + [4]*3)
def test_concurrency_without_failures(self): shared_data = [] @gen.coroutine def func(call_arg): for _ in xrange(20): # Let tornado chance to switch to another coroutine. yield gen.sleep(0.001) shared_data.append(call_arg) wrapped = utils.retry_data_watch_coroutine("node", func) yield [wrapped(1), wrapped(2), wrapped(3), wrapped(4)] # We expect that calls will be handled one by one without collisions. self.assertEqual(shared_data, [1]*20 + [2]*20 + [3]*20 + [4]*20)
def test_wrapping_coroutine(self, wait_mock): wait_mock.side_effect = testing.gen.coroutine(lambda sec: False) attempt = {'i': 0} @gen.coroutine def func(increment): if attempt['i'] <= 4: attempt['i'] += increment raise ValueError("First should be greater than second") raise gen.Return(attempt) wrapped = utils.retry_data_watch_coroutine("node", func) # Test retry helps. result = yield wrapped(2) self.assertEqual(result, {'i': 6})
def test_backoff_and_logging(self, gauss_mock, logger_mock, wait_mock): random_value = 0.84 gauss_mock.return_value = random_value wait_mock.side_effect = testing.gen.coroutine(lambda sec: False) def do_work(): raise ValueError(u"Error \u26a0!") persistent_work = utils.retry_data_watch_coroutine( "node", do_work, backoff_base=3, backoff_multiplier=0.1, backoff_threshold=2, max_retries=4 ) try: yield persistent_work() self.fail("Exception was expected") except ValueError: pass # Check backoff sleep calls (0.1 * (3 ** attempt) * random_value). sleep_args = [args[0] for args, kwargs in wait_mock.call_args_list] self.assertAlmostEqual(sleep_args[0], 0.33, 2) self.assertAlmostEqual(sleep_args[1], 0.99, 2) self.assertAlmostEqual(sleep_args[2], 2.2, 2) self.assertAlmostEqual(sleep_args[3], 2.2, 2) # Verify logged errors. expected_logs = [ "Retry #1 in 0.3s", "Retry #2 in 1.0s", "Retry #3 in 2.2s", "Retry #4 in 2.2s", ] self.assertEqual(len(expected_logs), len(logger_mock.call_args_list)) expected_messages = iter(expected_logs) for call_args_kwargs in logger_mock.call_args_list: error_message = expected_messages.next() self.assertEqual(call_args_kwargs[0][0], error_message)