コード例 #1
0
 def testStudentsTNllAgainstTfp(self):
     """Check that our Student's T NLL matches TensorFlow Probability."""
     for _ in range(10):
         x = np.random.normal()
         df = np.exp(4. * np.random.normal())
         scale = np.exp(4. * np.random.normal())
         nll = util.students_t_nll(x, df, scale)
         nll_true = -tfp.distributions.StudentT(
             df=df, loc=tf.zeros_like(scale), scale=scale).log_prob(x)
         self.assertAllClose(nll, nll_true, atol=1e-4, rtol=1e-4)
コード例 #2
0
    def lossfun(self, x):
        """A variant of lossfun() that uses the NLL of a Student's t-distribution.

    Args:
      x: The residual for which the loss is being computed. Must be a rank-2
        tensor, where the innermost dimension is the batch index, and the
        outermost dimension must be equal to self.num_dims. Must be a tensor or
        numpy array of type self.float_dtype.

    Returns:
      A tensor of the same type and shape as input `x`, containing the loss at
      each element of `x`. These "losses" are actually negative log-likelihoods
      (as produced by distribution.nllfun()) and so they are not actually
      bounded from below by zero. You'll probably want to minimize their sum or
      mean.
    """
        x = torch.as_tensor(x)
        assert len(x.shape) == 2
        assert x.shape[1] == self.num_dims
        assert x.dtype == self.float_dtype
        return util.students_t_nll(x, self.df(), self.scale())