def _coroutine(self):
    # type: (...) -> TunerCoroutine
    """Function that returns generator coroutine that optimizes hyperparams."""

    def key_for_hparams(hparams):
      return tuple(sorted(six.iteritems(hparams)))

    hparams_to_point = {}
    for hparams in self.initial_hparams:
      point = yield hparams
      key = key_for_hparams(hparams)
      hparams_to_point[key] = point

    def dist(hparams0, hparams1):
      assert set(six.iterkeys(hparams0)) == set(six.iterkeys(hparams1))
      squared_dist = sum((hparams0[k] - hparams1[k])**2 for k in hparams0)
      return squared_dist**0.5

    while True:
      points = list(six.itervalues(hparams_to_point))
      _, coeffs = learn_lin_reg.learn_linear_regularizer(points)
      hparams = dict(zip(self.hparam_names, coeffs))

      # If the hparams we get from solving the LP are very close to ones
      # we've already tried, revert to random sampling instead.
      min_dist = min(dist(hparams, dict(prev_hparams))
                     for prev_hparams in hparams_to_point)
      if min_dist < self.eps:
        hparams = self.sample_hparams()

      point = yield hparams
      key = key_for_hparams(hparams)
      hparams_to_point[key] = point
 def test_learn_linear_regularizer(self, data_points,
                                   expected_normalized_coefficients):
     alpha, coefficients = learn_lin_reg.learn_linear_regularizer(
         data_points)
     normalized_coefficients = coefficients / alpha
     np.testing.assert_almost_equal(expected_normalized_coefficients,
                                    normalized_coefficients)