def _log_normalizer(self): return torch.lgamma(.5 * self.df) - torch.lgamma(.5 * (self.df + self.loc.shape[-1])) \ + .5 * torch.log(math.pi * self.df) + triangular_logdet(self.scale_tril)
def entropy(self) -> torch.Tensor: logdet = triangular_logdet(self.scale_tril) return .5 * self.event_shape[0] * (1. + _LOG_2PI) + logdet
def log_normalizer(self): D = self.event_shape[-1] return torch.mvlgamma(self.concentration, D) + self.concentration \ * (D * math.log(2.) + 2. * util.triangular_logdet(self.scale_tril))
def _log_normalizer(self, nat_param1, nat_param2): maha, prec_tril = mahalanobis(-2. * nat_param2, nat_param1) logdet = triangular_logdet(prec_tril) return .5 * maha - logdet + .5 * self.event_shape[0] * _LOG_2PI
def expected_logdet(self): D = self.event_shape[-1] return mvdigamma(self.concentration, D) + D * math.log(2.) \ + 2. * util.triangular_logdet(self.scale_tril)