def _update_worker(self, queue_config, _): """ Handles updates to a queue configuration node. Since this runs in a separate thread, it doesn't change any state directly. Instead, it just acts as a bridge back to the main IO loop. Args: queue_config: A JSON string specifying queue configuration. """ # Prevent further watches if they are no longer needed. if queue_config is None: try: project_exists = self.zk_client.exists( '/appscale/projects/{}'.format(self.project_id)) is not None except ZookeeperError: # If the project has been deleted, an extra "exists" watch will remain. project_exists = True if not project_exists: self._stopped = True return False persistent_update_worker = retry_data_watch_coroutine( self.queues_node, self.update_worker ) main_io_loop = IOLoop.instance() main_io_loop.add_callback(persistent_update_worker, queue_config)
def test_concurrency_between_different_nodes(self): shared_data = [] @gen.coroutine def func(call_arg): for _ in xrange(20): yield gen.sleep(0.001) shared_data.append(call_arg) wrapped_1 = async_retrying.retry_data_watch_coroutine("node-1", func) wrapped_2 = async_retrying.retry_data_watch_coroutine("node-2", func) wrapped_3 = async_retrying.retry_data_watch_coroutine("node-3", func) wrapped_4 = async_retrying.retry_data_watch_coroutine("node-4", func) yield [wrapped_1(1), wrapped_2(2), wrapped_3(3), wrapped_4(4)] self.assertNotEqual(shared_data, [1]*20 + [2]*20 + [3]*20 + [4]*20)
def test_exception_filter(self, wait_mock): wait_mock.side_effect = testing.gen.coroutine(lambda sec: False) def func(exc_class, msg, retries_to_success): retries_to_success['counter'] -= 1 if retries_to_success['counter'] <= 0: return "Succeeded" raise exc_class(msg) def err_filter(exception): return isinstance(exception, ValueError) wrapped = async_retrying.retry_data_watch_coroutine( "node", func, retry_on_exception=err_filter ) # Test retry helps. result = yield wrapped(ValueError, "Matched", {"counter": 3}) self.assertEqual(result, "Succeeded") # Test retry not applicable. try: yield wrapped(TypeError, "Failed", {"counter": 3}) self.fail("Exception was expected") except TypeError: pass
def test_exception_filter(self, wait_mock): wait_mock.side_effect = testing.gen.coroutine(lambda sec: False) def func(exc_class, msg, retries_to_success): retries_to_success['counter'] -= 1 if retries_to_success['counter'] <= 0: return "Succeeded" raise exc_class(msg) def err_filter(exception): return isinstance(exception, ValueError) wrapped = async_retrying.retry_data_watch_coroutine( "node", func, retry_on_exception=err_filter) # Test retry helps. result = yield wrapped(ValueError, "Matched", {"counter": 3}) self.assertEqual(result, "Succeeded") # Test retry not applicable. try: yield wrapped(TypeError, "Failed", {"counter": 3}) self.fail("Exception was expected") except TypeError: pass
def _update_worker(self, queue_config, _): """ Handles updates to a queue configuration node. Since this runs in a separate thread, it doesn't change any state directly. Instead, it just acts as a bridge back to the main IO loop. Args: queue_config: A JSON string specifying queue configuration. """ # Prevent further watches if they are no longer needed. if queue_config is None: try: project_exists = self.zk_client.exists( '/appscale/projects/{}'.format( self.project_id)) is not None except ZookeeperError: # If the project has been deleted, an extra "exists" watch will remain. project_exists = True if not project_exists: self._stopped = True return False persistent_update_worker = retry_data_watch_coroutine( self.queues_node, self.update_worker) main_io_loop = IOLoop.instance() main_io_loop.add_callback(persistent_update_worker, queue_config)
def test_concurrency_between_different_nodes(self): shared_data = [] @gen.coroutine def func(call_arg): for _ in xrange(20): yield gen.sleep(0.001) shared_data.append(call_arg) wrapped_1 = async_retrying.retry_data_watch_coroutine("node-1", func) wrapped_2 = async_retrying.retry_data_watch_coroutine("node-2", func) wrapped_3 = async_retrying.retry_data_watch_coroutine("node-3", func) wrapped_4 = async_retrying.retry_data_watch_coroutine("node-4", func) yield [wrapped_1(1), wrapped_2(2), wrapped_3(3), wrapped_4(4)] self.assertNotEqual(shared_data, [1] * 20 + [2] * 20 + [3] * 20 + [4] * 20)
def zk_state_listener(state): """ Handles changes to ZooKeeper connection state. Args: state: A string specifying the new ZooKeeper connection state. """ if state == KazooState.CONNECTED: persistent_create_server_node = retry_data_watch_coroutine( server_node, create_server_node) IOLoop.instance().add_callback(persistent_create_server_node)
def zk_state_listener(state): """ Handles changes to ZooKeeper connection state. Args: state: A string specifying the new ZooKeeper connection state. """ if state == KazooState.CONNECTED: persistent_create_server_node = retry_data_watch_coroutine( server_node, create_server_node) IOLoop.instance().add_callback(persistent_create_server_node)
def _controller_state_watch(self, encoded_controller_state, _): """ Handles updates to controller state. Args: encoded_controller_state: A JSON-encoded string containing controller state. """ persistent_update_controller_state = retry_data_watch_coroutine( CONTROLLER_STATE_NODE, self._update_controller_state) IOLoop.instance().add_callback( persistent_update_controller_state, encoded_controller_state)
def _controller_state_watch(self, encoded_controller_state, _): """ Handles updates to controller state. Args: encoded_controller_state: A JSON-encoded string containing controller state. """ persistent_update_controller_state = retry_data_watch_coroutine( CONTROLLER_STATE_NODE, self._update_controller_state) IOLoop.instance().add_callback(persistent_update_controller_state, encoded_controller_state)
def _update_services_watch(self, encoded_assignments, _): """ Updates service schedules to fulfill assignments. Args: encoded_assignments: A JSON-encoded string specifying service assignments. """ persistent_update_services = retry_data_watch_coroutine( self._assignments_path, self._update_services) assignments = json.loads(encoded_assignments) if encoded_assignments else {} IOLoop.instance().add_callback(persistent_update_services, assignments)
def _update_services_watch(self, encoded_assignments, _): """ Updates service schedules to fulfill assignments. Args: encoded_assignments: A JSON-encoded string specifying service assignments. """ persistent_update_services = retry_data_watch_coroutine( self._assignments_path, self._update_services) assignments = json.loads(encoded_assignments) if encoded_assignments else {} IOLoop.instance().add_callback(persistent_update_services, assignments)
def test_no_errors(self, warning_mock, error_mock, wait_mock): wait_mock.side_effect = testing.gen.coroutine(lambda sec: sec) # Call dummy lambda persistently. persistent_work = async_retrying.retry_data_watch_coroutine( "node", lambda: "No Errors") result = yield persistent_work() # Assert outcomes. self.assertEqual(result, "No Errors") self.assertEqual(wait_mock.call_args_list, []) self.assertEqual(warning_mock.call_args_list, []) self.assertEqual(error_mock.call_args_list, [])
def test_no_errors(self, warning_mock, error_mock, wait_mock): wait_mock.side_effect = testing.gen.coroutine(lambda sec: sec) # Call dummy lambda persistently. persistent_work = async_retrying.retry_data_watch_coroutine( "node", lambda: "No Errors" ) result = yield persistent_work() # Assert outcomes. self.assertEqual(result, "No Errors") self.assertEqual(wait_mock.call_args_list, []) self.assertEqual(warning_mock.call_args_list, []) self.assertEqual(error_mock.call_args_list, [])
def _update_version_watch(self, new_version, _): """ Handles updates to a version node. Args: new_version: A JSON string specifying version details. """ if new_version is None: self._stopped = True return False persistent_update_version = retry_data_watch_coroutine( self.version_node, self.update_version) main_io_loop = IOLoop.instance() main_io_loop.add_callback(persistent_update_version, new_version)
def _update_version_watch(self, new_version, _): """ Handles updates to a version node. Args: new_version: A JSON string specifying version details. """ if new_version is None: self._stopped = True return False persistent_update_version = retry_data_watch_coroutine( self.version_node, self.update_version ) main_io_loop = IOLoop.instance() main_io_loop.add_callback(persistent_update_version, new_version)
def test_concurrency_with_failures(self): shared_data = [] @gen.coroutine def func(call_arg): for _ in xrange(3): yield gen.sleep(0.001) shared_data.append(call_arg) if call_arg != 4: raise ValueError("Why not 4?") wrapped = async_retrying.retry_data_watch_coroutine("node", func) yield [wrapped(1), wrapped(2), wrapped(3), wrapped(4)] self.assertEqual(shared_data, [1]*3 + [2]*3 + [3]*3 + [4]*3)
def test_concurrency_without_failures(self): shared_data = [] @gen.coroutine def func(call_arg): for _ in xrange(20): # Let tornado chance to switch to another coroutine. yield gen.sleep(0.001) shared_data.append(call_arg) wrapped = async_retrying.retry_data_watch_coroutine("node", func) yield [wrapped(1), wrapped(2), wrapped(3), wrapped(4)] # We expect that calls will be handled one by one without collisions. self.assertEqual(shared_data, [1]*20 + [2]*20 + [3]*20 + [4]*20)
def test_concurrency_with_failures(self): shared_data = [] @gen.coroutine def func(call_arg): for _ in xrange(3): yield gen.sleep(0.001) shared_data.append(call_arg) if call_arg != 4: raise ValueError("Why not 4?") wrapped = async_retrying.retry_data_watch_coroutine("node", func) yield [wrapped(1), wrapped(2), wrapped(3), wrapped(4)] self.assertEqual(shared_data, [1] * 3 + [2] * 3 + [3] * 3 + [4] * 3)
def test_backoff_and_logging(self, random_mock, warning_mock, error_mock, wait_mock, current_io_loop_mock): random_value = 0.84 random_mock.return_value = random_value wait_mock.side_effect = testing.gen.coroutine(lambda sec: False) current_io_loop_mock.return_value = MagicMock(time=lambda: 100.0) def do_work(): raise ValueError(u"Error \u26a0!") persistent_work = async_retrying.retry_data_watch_coroutine( "node", do_work, backoff_base=3, backoff_multiplier=0.1, backoff_threshold=2, max_retries=4) try: yield persistent_work() self.fail("Exception was expected") except ValueError: pass # Check backoff sleep calls (0.1 * (3 ** attempt) * random_value). sleep_args = [args[0] for args, kwargs in wait_mock.call_args_list] self.assertAlmostEqual(sleep_args[0], 100.33, 2) self.assertAlmostEqual(sleep_args[1], 100.99, 2) self.assertAlmostEqual(sleep_args[2], 102.2, 2) self.assertAlmostEqual(sleep_args[3], 102.2, 2) # Verify logged warnings. expected_warnings = [ "Retry #1 in 0.3s", "Retry #2 in 1.0s", "Retry #3 in 2.2s", "Retry #4 in 2.2s", ] self.assertEqual(len(expected_warnings), len(warning_mock.call_args_list)) expected_messages = iter(expected_warnings) for call_args_kwargs in warning_mock.call_args_list: error_message = expected_messages.next() self.assertTrue(call_args_kwargs[0][0].startswith("Traceback")) self.assertTrue(call_args_kwargs[0][0].endswith(error_message)) # Verify errors self.assertEqual( error_mock.call_args_list, [call("Giving up retrying after 5 attempts during 0.0s")])
def test_concurrency_without_failures(self): shared_data = [] @gen.coroutine def func(call_arg): for _ in xrange(20): # Let tornado chance to switch to another coroutine. yield gen.sleep(0.001) shared_data.append(call_arg) wrapped = async_retrying.retry_data_watch_coroutine("node", func) yield [wrapped(1), wrapped(2), wrapped(3), wrapped(4)] # We expect that calls will be handled one by one without collisions. self.assertEqual(shared_data, [1] * 20 + [2] * 20 + [3] * 20 + [4] * 20)
def test_wrapping_coroutine(self, wait_mock): wait_mock.side_effect = testing.gen.coroutine(lambda sec: False) attempt = {'i': 0} @gen.coroutine def func(increment): if attempt['i'] <= 4: attempt['i'] += increment raise ValueError("First should be greater than second") raise gen.Return(attempt) wrapped = async_retrying.retry_data_watch_coroutine("node", func) # Test retry helps. result = yield wrapped(2) self.assertEqual(result, {'i': 6})
def test_wrapping_coroutine(self, wait_mock): wait_mock.side_effect = testing.gen.coroutine(lambda sec: False) attempt = {'i': 0} @gen.coroutine def func(increment): if attempt['i'] <= 4: attempt['i'] += increment raise ValueError("First should be greater than second") raise gen.Return(attempt) wrapped = async_retrying.retry_data_watch_coroutine("node", func) # Test retry helps. result = yield wrapped(2) self.assertEqual(result, {'i': 6})
def test_backoff_and_logging(self, random_mock, warning_mock, error_mock, wait_mock, current_io_loop_mock): random_value = 0.84 random_mock.return_value = random_value wait_mock.side_effect = testing.gen.coroutine(lambda sec: False) current_io_loop_mock.return_value = MagicMock(time=lambda: 100.0) def do_work(): raise ValueError(u"Error \u26a0!") persistent_work = async_retrying.retry_data_watch_coroutine( "node", do_work, backoff_base=3, backoff_multiplier=0.1, backoff_threshold=2, max_retries=4 ) try: yield persistent_work() self.fail("Exception was expected") except ValueError: pass # Check backoff sleep calls (0.1 * (3 ** attempt) * random_value). sleep_args = [args[0] for args, kwargs in wait_mock.call_args_list] self.assertAlmostEqual(sleep_args[0], 100.33, 2) self.assertAlmostEqual(sleep_args[1], 100.99, 2) self.assertAlmostEqual(sleep_args[2], 102.2, 2) self.assertAlmostEqual(sleep_args[3], 102.2, 2) # Verify logged warnings. expected_warnings = [ "Retry #1 in 0.3s", "Retry #2 in 1.0s", "Retry #3 in 2.2s", "Retry #4 in 2.2s", ] self.assertEqual(len(expected_warnings), len(warning_mock.call_args_list)) expected_messages = iter(expected_warnings) for call_args_kwargs in warning_mock.call_args_list: error_message = expected_messages.next() self.assertTrue(call_args_kwargs[0][0].startswith("Traceback")) self.assertTrue(call_args_kwargs[0][0].endswith(error_message)) # Verify errors self.assertEqual(error_mock.call_args_list, [call("Giving up retrying after 5 attempts during 0.0s")])