def expected_stats(self):
     mean, nu, a, B = self.to_standard()
     D = mean.shape[-1]
     maha, B_tril = mahalanobis(B, mean)
     logdet_B = 2. * triangular_logdet(B_tril)
     expec_lambda2 = -.5 * a[..., None, None] * cholseky_inverse(B_tril)
     expec_lambda1 = -2. * matvec(expec_lambda2, mean)
     expec_log_norm = .5 * (a * maha + D / nu + mvdigamma(a, D) - logdet_B)
     return expec_lambda1, expec_lambda2, expec_log_norm
Beispiel #2
0
 def _log_normalizer(self):
     return torch.lgamma(.5 * self.df) - torch.lgamma(.5 * (self.df + self.loc.shape[-1])) \
            + .5 * torch.log(math.pi * self.df) + triangular_logdet(self.scale_tril)
Beispiel #3
0
 def log_normalizer(self):
     D = self.event_shape[-1]
     return torch.mvlgamma(self.concentration, D) + self.concentration \
         * (D * math.log(2.) + 2. * LA.triangular_logdet(self.scale_tril))
Beispiel #4
0
 def expected_logdet(self):
     D = self.event_shape[-1]
     return mvdigamma(self.concentration, D) + D * math.log(2.) \
            + 2. * LA.triangular_logdet(self.scale_tril)
Beispiel #5
0
 def entropy(self) -> torch.Tensor:
     logdet = triangular_logdet(self.scale_tril)
     return .5 * self.event_shape[0] * (1. + _LOG_2PI) + logdet
Beispiel #6
0
 def _log_normalizer(self, nat_param1, nat_param2):
     maha, prec_tril = mahalanobis(-2. * nat_param2, nat_param1)
     logdet = triangular_logdet(prec_tril)
     return .5 * maha - logdet + .5 * self.event_shape[0] * _LOG_2PI