def testLossfunChecksShape(self):
   """Tests that the image lossfun's checks input shapes."""
   x1 = np.ones((10, 24), np.float32)
   x2 = np.ones((10, 16), np.float32)
   lossfun = adaptive.AdaptiveLossFunction(x1.shape[1], np.float32)
   with self.assertRaises(tf.errors.InvalidArgumentError):
     lossfun(x2)
  def testFixedAlphaAndScaleAreCorrect(self, float_dtype):
    """Tests that fixed alphas and scales do not change during optimization)."""
    for _ in range(8):
      alpha_lo = float_dtype(np.random.uniform() * 2.)
      alpha_hi = alpha_lo
      scale_init = float_dtype(np.random.uniform() + 0.5)
      scale_lo = scale_init
      samples = float_dtype(np.random.uniform(size=(10, 10)))

      # We must construct some variable for TF to attempt to optimize.
      mu = tf.Variable(
          tf.zeros(tf.shape(samples)[1], float_dtype), name='ToyMu')
      adaptive_lossfun = adaptive.AdaptiveLossFunction(
          mu.shape[0],
          float_dtype,
          alpha_lo=alpha_lo,
          alpha_hi=alpha_hi,
          scale_lo=scale_lo,
          scale_init=scale_init)
      trainable_variables = list(adaptive_lossfun.trainable_variables) + [mu]

      optimizer = tf.keras.optimizers.SGD(learning_rate=1000)
      # pylint: disable=cell-var-from-loop
      optimizer.minimize(
          lambda: tf.reduce_mean(adaptive_lossfun(samples - mu[tf.newaxis, :])),
          trainable_variables)

      alpha = adaptive_lossfun.alpha()[0, :].numpy()
      scale = adaptive_lossfun.scale()[0, :].numpy()

      alpha_init = (alpha_lo + alpha_hi) / 2.
      self.assertAllClose(alpha, alpha_init * np.ones_like(alpha))
      self.assertAllClose(scale, scale_init * np.ones_like(alpha))
 def testInitialAlphaAndScaleAreCorrect(self, float_dtype):
   """Tests that `alpha` and `scale` are initialized as expected."""
   for i in range(8):
     # Generate random ranges for alpha and scale.
     alpha_lo = float_dtype(np.random.uniform())
     alpha_hi = float_dtype(np.random.uniform() + 1.)
     # Half of the time pick a random initialization for alpha, the other half
     # use the default value.
     if i % 2 == 0:
       alpha_init = float_dtype(alpha_lo + np.random.uniform() *
                                (alpha_hi - alpha_lo))
       true_alpha_init = alpha_init
     else:
       alpha_init = None
       true_alpha_init = (alpha_lo + alpha_hi) / 2.
     scale_init = float_dtype(np.random.uniform() + 0.5)
     scale_lo = float_dtype(np.random.uniform() * 0.1)
     adaptive_lossfun = adaptive.AdaptiveLossFunction(
         10,
         float_dtype,
         alpha_lo=alpha_lo,
         alpha_hi=alpha_hi,
         alpha_init=alpha_init,
         scale_lo=scale_lo,
         scale_init=scale_init)
     alpha = adaptive_lossfun.alpha()[0, :].numpy()
     scale = adaptive_lossfun.scale()[0, :].numpy()
     self.assertAllClose(alpha, true_alpha_init * np.ones_like(alpha))
     self.assertAllClose(scale, scale_init * np.ones_like(alpha))
 def testLossfunPreservesDtype(self, float_dtype):
   """Checks the loss's outputs have the same precisions as its input."""
   num_dims = 8
   samples, _, _, _ = sample_nd_mixed_data(100, num_dims, float_dtype)
   lossfun = adaptive.AdaptiveLossFunction(num_dims, float_dtype)
   loss = lossfun(samples)
   self.assertDTypeEqual(loss, float_dtype)
   self.assertDTypeEqual(lossfun.alpha(), float_dtype)
   self.assertDTypeEqual(lossfun.scale(), float_dtype)
  def testFittingToyNdMixedDataIsCorrect(self, float_dtype):
    """Tests that minimizing the adaptive loss recovers the true model.

    Here we generate a 2D array of samples drawn from a mix of scaled and
    shifted Cauchy and Normal distributions. We then minimize our loss with
    respect to the mean, scale, and shape of each distribution, and check that
    after minimization the shape parameter is near-zero for the Cauchy data and
    near 2 for the Normal data, and that the estimated means and scales are
    accurate.

    Args:
      float_dtype: The type (np.float32 or np.float64) of data to test.
    """
    samples, mu_true, alpha_true, scale_true = sample_nd_mixed_data(
        100, 8, float_dtype)
    mu = tf.Variable(tf.zeros(tf.shape(samples)[1], float_dtype), name='ToyMu')
    adaptive_lossfun = adaptive.AdaptiveLossFunction(mu.shape[0], float_dtype)
    trainable_variables = list(adaptive_lossfun.trainable_variables) + [mu]

    init_rate = 1.
    final_rate = 0.1
    num_iters = 201
    learning_rate = tf.keras.optimizers.schedules.ExponentialDecay(
        init_rate, 1, (final_rate / init_rate)**(1. / num_iters))
    optimizer = tf.keras.optimizers.Adam(
        learning_rate=learning_rate, beta_1=0.5, beta_2=0.9, epsilon=1e-08)

    for _ in range(num_iters):
      optimizer.minimize(
          lambda: tf.reduce_mean(adaptive_lossfun(samples - mu[tf.newaxis, :])),
          trainable_variables)

    mu = mu.numpy()
    alpha = adaptive_lossfun.alpha()[0, :].numpy()
    scale = adaptive_lossfun.scale()[0, :].numpy()

    for a, b in [(alpha, alpha_true), (scale, scale_true), (mu, mu_true)]:
      self.assertAllClose(a, b * np.ones_like(a), rtol=0.1, atol=0.1)