コード例 #1
0
  def testFittingToyNdMixedDataIsCorrect(self, float_dtype):
    """Tests that minimizing the adaptive loss recovers the true model.

    Here we generate a 2D array of samples drawn from a mix of scaled and
    shifted Cauchy and Normal distributions. We then minimize our loss with
    respect to the mean, scale, and shape of each distribution, and check that
    after minimization the shape parameter is near-zero for the Cauchy data and
    near 2 for the Normal data, and that the estimated means and scales are
    accurate.

    Args:
      float_dtype: The type (np.float32 or np.float64) of data to test.
    """
    num_dims = 8
    samples, mu_true, alpha_true, scale_true = self._sample_nd_mixed_data(
        100, num_dims, float_dtype)
    mu = Variable(
        torch.tensor(np.zeros(samples.shape[1], float_dtype)),
        requires_grad=True)

    adaptive_lossfun = adaptive.AdaptiveLossFunction(num_dims, float_dtype)
    params = torch.nn.ParameterList(adaptive_lossfun.parameters())
    optimizer = torch.optim.Adam([p for p in params] + [mu], lr=0.1)
    for _ in range(1000):
      optimizer.zero_grad()
      x = torch.as_tensor(samples) - mu[np.newaxis, :]
      loss = torch.sum(adaptive_lossfun.lossfun(x))
      loss.backward(retain_graph=True)
      optimizer.step()

    mu = mu.detach().numpy()
    alpha = adaptive_lossfun.alpha()[0, :].detach().numpy()
    scale = adaptive_lossfun.scale()[0, :].detach().numpy()
    for a, b in [(alpha, alpha_true), (scale, scale_true), (mu, mu_true)]:
      np.testing.assert_allclose(a, b * np.ones_like(a), rtol=0.1, atol=0.1)
コード例 #2
0
 def testInitialAlphaAndScaleAreCorrect(self, float_dtype):
   """Tests that `alpha` and `scale` are initialized as expected."""
   for i in range(8):
     # Generate random ranges for alpha and scale.
     alpha_lo = float_dtype(np.random.uniform())
     alpha_hi = float_dtype(np.random.uniform() + 1.)
     # Half of the time pick a random initialization for alpha, the other half
     # use the default value.
     if i % 2 == 0:
       alpha_init = float_dtype(alpha_lo + np.random.uniform() *
                                (alpha_hi - alpha_lo))
       true_alpha_init = alpha_init
     else:
       alpha_init = None
       true_alpha_init = (alpha_lo + alpha_hi) / 2.
     scale_init = float_dtype(np.random.uniform() + 0.5)
     scale_lo = float_dtype(np.random.uniform() * 0.1)
     adaptive_lossfun = adaptive.AdaptiveLossFunction(
         10,
         float_dtype,
         alpha_lo=alpha_lo,
         alpha_hi=alpha_hi,
         alpha_init=alpha_init,
         scale_lo=scale_lo,
         scale_init=scale_init)
     alpha = adaptive_lossfun.alpha().detach().numpy()
     scale = adaptive_lossfun.scale().detach().numpy()
     np.testing.assert_allclose(alpha, true_alpha_init * np.ones_like(alpha))
     np.testing.assert_allclose(scale, scale_init * np.ones_like(scale))
コード例 #3
0
ファイル: adaptive_test.py プロジェクト: vnesh-san/nmpevqvae
    def testFixedAlphaAndScaleAreCorrect(self, float_dtype, device_string):
        """Tests that fixed alphas and scales do not change during optimization)."""
        device = _get_device(device_string)
        alpha_lo = np.random.uniform() * 2.0
        alpha_hi = alpha_lo
        scale_init = float_dtype(np.random.uniform() + 0.5)
        scale_lo = scale_init
        num_dims = 10
        # We must construct some variable for TF to attempt to optimize.
        adaptive_lossfun = adaptive.AdaptiveLossFunction(
            num_dims,
            float_dtype,
            device,
            alpha_lo=alpha_lo,
            alpha_hi=alpha_hi,
            scale_lo=scale_lo,
            scale_init=scale_init,
        )

        params = torch.nn.ParameterList(adaptive_lossfun.parameters())
        assert len(params) == 0
        alpha = adaptive_lossfun.alpha().cpu().detach()
        scale = adaptive_lossfun.scale().cpu().detach()
        alpha_init = (alpha_lo + alpha_hi) / 2.0
        np.testing.assert_allclose(alpha, alpha_init * np.ones_like(alpha))
        np.testing.assert_allclose(scale, scale_init * np.ones_like(alpha))
コード例 #4
0
 def testLossfunPreservesDtype(self, float_dtype):
   """Checks the loss's outputs have the same precisions as its input."""
   num_dims = 8
   samples, _, _, _ = self._sample_nd_mixed_data(100, num_dims, float_dtype)
   adaptive_lossfun = adaptive.AdaptiveLossFunction(num_dims, float_dtype)
   loss = adaptive_lossfun.lossfun(samples).detach().numpy()
   alpha = adaptive_lossfun.alpha().detach().numpy()
   scale = adaptive_lossfun.scale().detach().numpy()
   np.testing.assert_(loss.dtype, float_dtype)
   np.testing.assert_(alpha.dtype, float_dtype)
   np.testing.assert_(scale.dtype, float_dtype)
コード例 #5
0
ファイル: adaptive_test.py プロジェクト: vnesh-san/nmpevqvae
 def testLossfunPreservesDtype(self, float_dtype, device_string):
     """Checks the loss's outputs have the same precisions as its input."""
     device = _get_device(device_string)
     num_dims = 8
     samples, _, _, _ = self._sample_nd_mixed_data(100, num_dims,
                                                   float_dtype)
     adaptive_lossfun = adaptive.AdaptiveLossFunction(
         num_dims, float_dtype, device)
     loss = (adaptive_lossfun.lossfun(torch.tensor(
         samples, device=device)).cpu().detach().numpy())
     alpha = adaptive_lossfun.alpha().cpu().detach().numpy()
     scale = adaptive_lossfun.scale().cpu().detach().numpy()
     np.testing.assert_(loss.dtype, float_dtype)
     np.testing.assert_(alpha.dtype, float_dtype)
     np.testing.assert_(scale.dtype, float_dtype)
コード例 #6
0
 def loss_func(preds, targets):
     adaptive_lossfun = adaptive.AdaptiveLossFunction(1, np.float32, 'cuda')
     d = torch.as_tensor(preds - targets)
     loss = torch.sum(adaptive_lossfun.lossfun(d))
     return loss