Пример #1
0
  def _update_worker(self, queue_config, _):
    """ Handles updates to a queue configuration node.

    Since this runs in a separate thread, it doesn't change any state directly.
    Instead, it just acts as a bridge back to the main IO loop.

    Args:
      queue_config: A JSON string specifying queue configuration.
    """
    # Prevent further watches if they are no longer needed.
    if queue_config is None:
      try:
        project_exists = self.zk_client.exists(
          '/appscale/projects/{}'.format(self.project_id)) is not None
      except ZookeeperError:
        # If the project has been deleted, an extra "exists" watch will remain.
        project_exists = True

      if not project_exists:
        self._stopped = True
        return False

    persistent_update_worker = retry_data_watch_coroutine(
      self.queues_node, self.update_worker
    )
    main_io_loop = IOLoop.instance()
    main_io_loop.add_callback(persistent_update_worker, queue_config)
Пример #2
0
  def test_concurrency_between_different_nodes(self):
    shared_data = []

    @gen.coroutine
    def func(call_arg):
      for _ in xrange(20):
        yield gen.sleep(0.001)
        shared_data.append(call_arg)

    wrapped_1 = async_retrying.retry_data_watch_coroutine("node-1", func)
    wrapped_2 = async_retrying.retry_data_watch_coroutine("node-2", func)
    wrapped_3 = async_retrying.retry_data_watch_coroutine("node-3", func)
    wrapped_4 = async_retrying.retry_data_watch_coroutine("node-4", func)

    yield [wrapped_1(1), wrapped_2(2), wrapped_3(3), wrapped_4(4)]
    self.assertNotEqual(shared_data, [1]*20 + [2]*20 + [3]*20 + [4]*20)
Пример #3
0
  def test_exception_filter(self, wait_mock):
    wait_mock.side_effect = testing.gen.coroutine(lambda sec: False)

    def func(exc_class, msg, retries_to_success):
      retries_to_success['counter'] -= 1
      if retries_to_success['counter'] <= 0:
        return "Succeeded"
      raise exc_class(msg)

    def err_filter(exception):
      return isinstance(exception, ValueError)

    wrapped = async_retrying.retry_data_watch_coroutine(
      "node", func, retry_on_exception=err_filter
    )

    # Test retry helps.
    result = yield wrapped(ValueError, "Matched", {"counter": 3})
    self.assertEqual(result, "Succeeded")

    # Test retry not applicable.
    try:
      yield wrapped(TypeError, "Failed", {"counter": 3})
      self.fail("Exception was expected")
    except TypeError:
      pass
Пример #4
0
    def test_exception_filter(self, wait_mock):
        wait_mock.side_effect = testing.gen.coroutine(lambda sec: False)

        def func(exc_class, msg, retries_to_success):
            retries_to_success['counter'] -= 1
            if retries_to_success['counter'] <= 0:
                return "Succeeded"
            raise exc_class(msg)

        def err_filter(exception):
            return isinstance(exception, ValueError)

        wrapped = async_retrying.retry_data_watch_coroutine(
            "node", func, retry_on_exception=err_filter)

        # Test retry helps.
        result = yield wrapped(ValueError, "Matched", {"counter": 3})
        self.assertEqual(result, "Succeeded")

        # Test retry not applicable.
        try:
            yield wrapped(TypeError, "Failed", {"counter": 3})
            self.fail("Exception was expected")
        except TypeError:
            pass
Пример #5
0
    def _update_worker(self, queue_config, _):
        """ Handles updates to a queue configuration node.

    Since this runs in a separate thread, it doesn't change any state directly.
    Instead, it just acts as a bridge back to the main IO loop.

    Args:
      queue_config: A JSON string specifying queue configuration.
    """
        # Prevent further watches if they are no longer needed.
        if queue_config is None:
            try:
                project_exists = self.zk_client.exists(
                    '/appscale/projects/{}'.format(
                        self.project_id)) is not None
            except ZookeeperError:
                # If the project has been deleted, an extra "exists" watch will remain.
                project_exists = True

            if not project_exists:
                self._stopped = True
                return False

        persistent_update_worker = retry_data_watch_coroutine(
            self.queues_node, self.update_worker)
        main_io_loop = IOLoop.instance()
        main_io_loop.add_callback(persistent_update_worker, queue_config)
Пример #6
0
    def test_concurrency_between_different_nodes(self):
        shared_data = []

        @gen.coroutine
        def func(call_arg):
            for _ in xrange(20):
                yield gen.sleep(0.001)
                shared_data.append(call_arg)

        wrapped_1 = async_retrying.retry_data_watch_coroutine("node-1", func)
        wrapped_2 = async_retrying.retry_data_watch_coroutine("node-2", func)
        wrapped_3 = async_retrying.retry_data_watch_coroutine("node-3", func)
        wrapped_4 = async_retrying.retry_data_watch_coroutine("node-4", func)

        yield [wrapped_1(1), wrapped_2(2), wrapped_3(3), wrapped_4(4)]
        self.assertNotEqual(shared_data,
                            [1] * 20 + [2] * 20 + [3] * 20 + [4] * 20)
Пример #7
0
def zk_state_listener(state):
  """ Handles changes to ZooKeeper connection state.

  Args:
    state: A string specifying the new ZooKeeper connection state.
  """
  if state == KazooState.CONNECTED:
    persistent_create_server_node = retry_data_watch_coroutine(
      server_node, create_server_node)
    IOLoop.instance().add_callback(persistent_create_server_node)
Пример #8
0
    def zk_state_listener(state):
        """ Handles changes to ZooKeeper connection state.

    Args:
      state: A string specifying the new ZooKeeper connection state.
    """
        if state == KazooState.CONNECTED:
            persistent_create_server_node = retry_data_watch_coroutine(
                server_node, create_server_node)
            IOLoop.instance().add_callback(persistent_create_server_node)
Пример #9
0
  def _controller_state_watch(self, encoded_controller_state, _):
    """ Handles updates to controller state.

    Args:
      encoded_controller_state: A JSON-encoded string containing controller
        state.
    """
    persistent_update_controller_state = retry_data_watch_coroutine(
      CONTROLLER_STATE_NODE, self._update_controller_state)
    IOLoop.instance().add_callback(
      persistent_update_controller_state, encoded_controller_state)
Пример #10
0
    def _controller_state_watch(self, encoded_controller_state, _):
        """ Handles updates to controller state.

    Args:
      encoded_controller_state: A JSON-encoded string containing controller
        state.
    """
        persistent_update_controller_state = retry_data_watch_coroutine(
            CONTROLLER_STATE_NODE, self._update_controller_state)
        IOLoop.instance().add_callback(persistent_update_controller_state,
                                       encoded_controller_state)
Пример #11
0
  def _update_services_watch(self, encoded_assignments, _):
    """ Updates service schedules to fulfill assignments.

    Args:
      encoded_assignments: A JSON-encoded string specifying service
        assignments.
    """
    persistent_update_services = retry_data_watch_coroutine(
      self._assignments_path, self._update_services)
    assignments = json.loads(encoded_assignments) if encoded_assignments else {}

    IOLoop.instance().add_callback(persistent_update_services, assignments)
Пример #12
0
  def _update_services_watch(self, encoded_assignments, _):
    """ Updates service schedules to fulfill assignments.

    Args:
      encoded_assignments: A JSON-encoded string specifying service
        assignments.
    """
    persistent_update_services = retry_data_watch_coroutine(
      self._assignments_path, self._update_services)
    assignments = json.loads(encoded_assignments) if encoded_assignments else {}

    IOLoop.instance().add_callback(persistent_update_services, assignments)
Пример #13
0
    def test_no_errors(self, warning_mock, error_mock, wait_mock):
        wait_mock.side_effect = testing.gen.coroutine(lambda sec: sec)

        # Call dummy lambda persistently.
        persistent_work = async_retrying.retry_data_watch_coroutine(
            "node", lambda: "No Errors")
        result = yield persistent_work()

        # Assert outcomes.
        self.assertEqual(result, "No Errors")
        self.assertEqual(wait_mock.call_args_list, [])
        self.assertEqual(warning_mock.call_args_list, [])
        self.assertEqual(error_mock.call_args_list, [])
Пример #14
0
  def test_no_errors(self, warning_mock, error_mock, wait_mock):
    wait_mock.side_effect = testing.gen.coroutine(lambda sec: sec)

    # Call dummy lambda persistently.
    persistent_work = async_retrying.retry_data_watch_coroutine(
      "node", lambda: "No Errors"
    )
    result = yield persistent_work()

    # Assert outcomes.
    self.assertEqual(result, "No Errors")
    self.assertEqual(wait_mock.call_args_list, [])
    self.assertEqual(warning_mock.call_args_list, [])
    self.assertEqual(error_mock.call_args_list, [])
Пример #15
0
    def _update_version_watch(self, new_version, _):
        """ Handles updates to a version node.

    Args:
      new_version: A JSON string specifying version details.
    """
        if new_version is None:
            self._stopped = True
            return False

        persistent_update_version = retry_data_watch_coroutine(
            self.version_node, self.update_version)
        main_io_loop = IOLoop.instance()
        main_io_loop.add_callback(persistent_update_version, new_version)
Пример #16
0
  def _update_version_watch(self, new_version, _):
    """ Handles updates to a version node.

    Args:
      new_version: A JSON string specifying version details.
    """
    if new_version is None:
      self._stopped = True
      return False

    persistent_update_version = retry_data_watch_coroutine(
      self.version_node, self.update_version
    )
    main_io_loop = IOLoop.instance()
    main_io_loop.add_callback(persistent_update_version, new_version)
Пример #17
0
  def test_concurrency_with_failures(self):
    shared_data = []

    @gen.coroutine
    def func(call_arg):
      for _ in xrange(3):
        yield gen.sleep(0.001)
        shared_data.append(call_arg)
      if call_arg != 4:
        raise ValueError("Why not 4?")

    wrapped = async_retrying.retry_data_watch_coroutine("node", func)

    yield [wrapped(1), wrapped(2), wrapped(3), wrapped(4)]
    self.assertEqual(shared_data, [1]*3 + [2]*3 + [3]*3 + [4]*3)
Пример #18
0
  def test_concurrency_without_failures(self):
    shared_data = []

    @gen.coroutine
    def func(call_arg):
      for _ in xrange(20):
        # Let tornado chance to switch to another coroutine.
        yield gen.sleep(0.001)
        shared_data.append(call_arg)

    wrapped = async_retrying.retry_data_watch_coroutine("node", func)

    yield [wrapped(1), wrapped(2), wrapped(3), wrapped(4)]
    # We expect that calls will be handled one by one without collisions.
    self.assertEqual(shared_data, [1]*20 + [2]*20 + [3]*20 + [4]*20)
Пример #19
0
    def test_concurrency_with_failures(self):
        shared_data = []

        @gen.coroutine
        def func(call_arg):
            for _ in xrange(3):
                yield gen.sleep(0.001)
                shared_data.append(call_arg)
            if call_arg != 4:
                raise ValueError("Why not 4?")

        wrapped = async_retrying.retry_data_watch_coroutine("node", func)

        yield [wrapped(1), wrapped(2), wrapped(3), wrapped(4)]
        self.assertEqual(shared_data, [1] * 3 + [2] * 3 + [3] * 3 + [4] * 3)
Пример #20
0
    def test_backoff_and_logging(self, random_mock, warning_mock, error_mock,
                                 wait_mock, current_io_loop_mock):
        random_value = 0.84
        random_mock.return_value = random_value
        wait_mock.side_effect = testing.gen.coroutine(lambda sec: False)
        current_io_loop_mock.return_value = MagicMock(time=lambda: 100.0)

        def do_work():
            raise ValueError(u"Error \u26a0!")

        persistent_work = async_retrying.retry_data_watch_coroutine(
            "node",
            do_work,
            backoff_base=3,
            backoff_multiplier=0.1,
            backoff_threshold=2,
            max_retries=4)
        try:
            yield persistent_work()
            self.fail("Exception was expected")
        except ValueError:
            pass

        # Check backoff sleep calls (0.1 * (3 ** attempt) * random_value).
        sleep_args = [args[0] for args, kwargs in wait_mock.call_args_list]
        self.assertAlmostEqual(sleep_args[0], 100.33, 2)
        self.assertAlmostEqual(sleep_args[1], 100.99, 2)
        self.assertAlmostEqual(sleep_args[2], 102.2, 2)
        self.assertAlmostEqual(sleep_args[3], 102.2, 2)

        # Verify logged warnings.
        expected_warnings = [
            "Retry #1 in 0.3s",
            "Retry #2 in 1.0s",
            "Retry #3 in 2.2s",
            "Retry #4 in 2.2s",
        ]
        self.assertEqual(len(expected_warnings),
                         len(warning_mock.call_args_list))
        expected_messages = iter(expected_warnings)
        for call_args_kwargs in warning_mock.call_args_list:
            error_message = expected_messages.next()
            self.assertTrue(call_args_kwargs[0][0].startswith("Traceback"))
            self.assertTrue(call_args_kwargs[0][0].endswith(error_message))
        # Verify errors
        self.assertEqual(
            error_mock.call_args_list,
            [call("Giving up retrying after 5 attempts during 0.0s")])
Пример #21
0
    def test_concurrency_without_failures(self):
        shared_data = []

        @gen.coroutine
        def func(call_arg):
            for _ in xrange(20):
                # Let tornado chance to switch to another coroutine.
                yield gen.sleep(0.001)
                shared_data.append(call_arg)

        wrapped = async_retrying.retry_data_watch_coroutine("node", func)

        yield [wrapped(1), wrapped(2), wrapped(3), wrapped(4)]
        # We expect that calls will be handled one by one without collisions.
        self.assertEqual(shared_data,
                         [1] * 20 + [2] * 20 + [3] * 20 + [4] * 20)
Пример #22
0
    def test_wrapping_coroutine(self, wait_mock):
        wait_mock.side_effect = testing.gen.coroutine(lambda sec: False)

        attempt = {'i': 0}

        @gen.coroutine
        def func(increment):
            if attempt['i'] <= 4:
                attempt['i'] += increment
                raise ValueError("First should be greater than second")
            raise gen.Return(attempt)

        wrapped = async_retrying.retry_data_watch_coroutine("node", func)

        # Test retry helps.
        result = yield wrapped(2)
        self.assertEqual(result, {'i': 6})
Пример #23
0
  def test_wrapping_coroutine(self, wait_mock):
    wait_mock.side_effect = testing.gen.coroutine(lambda sec: False)

    attempt = {'i': 0}

    @gen.coroutine
    def func(increment):
      if attempt['i'] <= 4:
        attempt['i'] += increment
        raise ValueError("First should be greater than second")
      raise gen.Return(attempt)

    wrapped = async_retrying.retry_data_watch_coroutine("node", func)

    # Test retry helps.
    result = yield wrapped(2)
    self.assertEqual(result, {'i': 6})
Пример #24
0
  def test_backoff_and_logging(self, random_mock, warning_mock, error_mock,
                               wait_mock, current_io_loop_mock):
    random_value = 0.84
    random_mock.return_value = random_value
    wait_mock.side_effect = testing.gen.coroutine(lambda sec: False)
    current_io_loop_mock.return_value = MagicMock(time=lambda: 100.0)

    def do_work():
      raise ValueError(u"Error \u26a0!")

    persistent_work = async_retrying.retry_data_watch_coroutine(
      "node", do_work, backoff_base=3, backoff_multiplier=0.1,
      backoff_threshold=2, max_retries=4
    )
    try:
      yield persistent_work()
      self.fail("Exception was expected")
    except ValueError:
      pass

    # Check backoff sleep calls (0.1 * (3 ** attempt) * random_value).
    sleep_args = [args[0] for args, kwargs in wait_mock.call_args_list]
    self.assertAlmostEqual(sleep_args[0], 100.33, 2)
    self.assertAlmostEqual(sleep_args[1], 100.99, 2)
    self.assertAlmostEqual(sleep_args[2], 102.2, 2)
    self.assertAlmostEqual(sleep_args[3], 102.2, 2)

    # Verify logged warnings.
    expected_warnings = [
      "Retry #1 in 0.3s",
      "Retry #2 in 1.0s",
      "Retry #3 in 2.2s",
      "Retry #4 in 2.2s",
    ]
    self.assertEqual(len(expected_warnings), len(warning_mock.call_args_list))
    expected_messages = iter(expected_warnings)
    for call_args_kwargs in warning_mock.call_args_list:
      error_message = expected_messages.next()
      self.assertTrue(call_args_kwargs[0][0].startswith("Traceback"))
      self.assertTrue(call_args_kwargs[0][0].endswith(error_message))
    # Verify errors
    self.assertEqual(error_mock.call_args_list,
                     [call("Giving up retrying after 5 attempts during 0.0s")])