Ejemplo n.º 1
0
 def testFixedAlphaAndScaleAreCorrect(self, float_dtype):
     """Tests that fixed alphas and scales do not change during optimization)."""
     for i in range(8):
         alpha_lo = float_dtype(np.random.uniform() * 2.)
         alpha_hi = alpha_lo
         scale_init = float_dtype(np.random.uniform() + 0.5)
         scale_lo = scale_init
         samples = float_dtype(np.random.uniform(size=(10, 10)))
         # We must construct some variable for TF to attempt to optimize.
         mu = tf.Variable(tf.zeros(tf.shape(samples)[1], float_dtype),
                          name='DummyMu')
         x = samples - mu[tf.newaxis, :]
         with tf.compat.v1.variable_scope('trial_' + str(i)):
             loss, alpha, scale = adaptive.lossfun(x,
                                                   alpha_lo=alpha_lo,
                                                   alpha_hi=alpha_hi,
                                                   scale_lo=scale_lo,
                                                   scale_init=scale_init)
             with self.session() as sess:
                 # Do one giant gradient descent step (usually a bad idea).
                 optimizer = tf.train.GradientDescentOptimizer(
                     learning_rate=1000.)
                 step = optimizer.minimize(tf.reduce_sum(loss))
                 sess.run(tf.global_variables_initializer())
                 _ = sess.run(step)
                 # Check that `alpha` and `scale` have not changed from their initial
                 # values.
                 alpha, scale = sess.run([alpha, scale])
                 alpha_init = (alpha_lo + alpha_hi) / 2.
                 self.assertAllClose(alpha,
                                     alpha_init * np.ones_like(alpha))
                 self.assertAllClose(scale,
                                     scale_init * np.ones_like(alpha))
Ejemplo n.º 2
0
 def testInitialAlphaAndScaleAreCorrect(self, float_dtype):
     """Tests that `alpha` and `scale` are initialized as expected."""
     for i in range(8):
         # Generate random ranges for alpha and scale.
         alpha_lo = float_dtype(np.random.uniform())
         alpha_hi = float_dtype(np.random.uniform() + 1.)
         # Half of the time pick a random initialization for alpha, the other half
         # use the default value.
         if i % 2 == 0:
             alpha_init = float_dtype(alpha_lo + np.random.uniform() *
                                      (alpha_hi - alpha_lo))
             true_alpha_init = alpha_init
         else:
             alpha_init = None
             true_alpha_init = (alpha_lo + alpha_hi) / 2.
         scale_init = float_dtype(np.random.uniform() + 0.5)
         scale_lo = float_dtype(np.random.uniform() * 0.1)
         with tf.compat.v1.variable_scope('trial_' + str(i)):
             _, alpha, scale = adaptive.lossfun(tf.constant(
                 np.zeros((10, 10), float_dtype)),
                                                alpha_lo=alpha_lo,
                                                alpha_hi=alpha_hi,
                                                alpha_init=alpha_init,
                                                scale_lo=scale_lo,
                                                scale_init=scale_init)
             with self.session() as sess:
                 sess.run(tf.global_variables_initializer())
                 # Check that `alpha` and `scale` are what we expect them to be.
                 alpha, scale = sess.run([alpha, scale])
                 self.assertAllClose(alpha,
                                     true_alpha_init * np.ones_like(alpha))
                 self.assertAllClose(scale,
                                     scale_init * np.ones_like(alpha))
Ejemplo n.º 3
0
 def testLossfunPreservesDtype(self, float_dtype):
     """Checks the loss's outputs have the same precisions as its input."""
     samples, _, _, _ = self._sample_nd_mixed_data(100, 8, float_dtype)
     loss, alpha, scale = adaptive.lossfun(samples)
     with self.session() as sess:
         sess.run(tf.global_variables_initializer())
         loss, alpha, scale = sess.run([loss, alpha, scale])
     self.assertDTypeEqual(loss, float_dtype)
     self.assertDTypeEqual(alpha, float_dtype)
     self.assertDTypeEqual(scale, float_dtype)
Ejemplo n.º 4
0
    def testFittingToyNdMixedDataIsCorrect(self, float_dtype):
        """Tests that minimizing the adaptive loss recovers the true model.

    Here we generate a 2D array of samples drawn from a mix of scaled and
    shifted Cauchy and Normal distributions. We then minimize our loss with
    respect to the mean, scale, and shape of each distribution, and check that
    after minimization the shape parameter is near-zero for the Cauchy data and
    near 2 for the Normal data, and that the estimated means and scales are
    accurate.

    Args:
      float_dtype: The type (np.float32 or np.float64) of data to test.
    """
        samples, mu_true, alpha_true, scale_true = self._sample_nd_mixed_data(
            100, 8, float_dtype)
        mu = tf.Variable(tf.zeros(tf.shape(samples)[1], float_dtype))
        x = samples - mu[tf.newaxis, :]
        losses, alpha, scale = adaptive.lossfun(x)
        loss = tf.reduce_mean(losses)

        with self.session() as sess:
            init_rate = 1.
            final_rate = 0.1
            num_iters = 201
            global_step = tf.Variable(0, trainable=False)
            t = tf.cast(global_step, tf.float32) / (num_iters - 1)
            rate = tf.math.exp(
                tf.math.log(init_rate) * (1. - t) +
                tf.math.log(final_rate) * t)
            optimizer = tf.train.AdamOptimizer(learning_rate=rate,
                                               beta1=0.5,
                                               beta2=0.9,
                                               epsilon=1e-08)
            step = optimizer.minimize(loss, global_step=global_step)
            sess.run(tf.global_variables_initializer())

            for _ in range(num_iters):
                _ = sess.run(step)
            mu, alpha, scale = sess.run([mu, alpha[0, :], scale[0, :]])

        for a, b in [(alpha, alpha_true), (scale, scale_true), (mu, mu_true)]:
            self.assertAllClose(a, b * np.ones_like(a), rtol=0.1, atol=0.1)