def entropy(self): # option 1: output = (-self.scale * ive(self.__m / 2, self.scale) / ive( (self.__m / 2) - 1, self.scale)) # option 2: # output = - self.scale * ive_fraction_approx(torch.tensor(self.__m / 2), self.scale) # option 3: # output = - self.scale * ive_fraction_approx2(torch.tensor(self.__m / 2), self.scale) return output.view(*(output.shape[:-1])) + self._log_normalization()
def _log_normalization(self): output = -( (self.__mf / 2 - 1) * math_ops.log(self.scale) - (self.__mf / 2) * math.log(2 * math.pi) - (self.scale + math_ops.log(ive(self.__mf / 2 - 1, self.scale)))) return array_ops.reshape( output, ops.convert_to_tensor(array_ops.shape(output)[:-1]))
def mean(self): return self.loc * (ive(self.__m / 2, self.scale) / ive(self.__m / 2 - 1, self.scale))
def _log_normalization(self): output = -((self.__m / 2 - 1) * torch.log(self.scale) - (self.__m / 2) * math.log(2 * math.pi) - (self.scale + torch.log(ive(self.__m / 2 - 1, self.scale)))) return output.view(*(output.shape[:-1]))
def entropy(self): output = -self.scale * ive(self.__m / 2, self.scale) / ive( (self.__m / 2) - 1, self.scale) return output.view(*(output.shape[:-1])) + self._log_normalization()
def _mean(self): return self._loc * (ive(self.__mf / 2, self.scale) / ive(self.__mf / 2 - 1, self.scale))
def _entropy(self): return -array_ops.reshape( self.scale * ive(self.__mf / 2, self.scale) / ive( (self.__mf / 2) - 1, self.scale), ops.convert_to_tensor(array_ops.shape( self.scale)[:-1])) + self._log_normalization()
def _kl_vmf_vmf(vmf1, vmf2): rel_entropy = ((vmf2.dim * 0.5) - 1) * torch.log( vmf2.scale) - (vmf2.dim * 0.5) * math.log(2 * math.pi) - torch.log( ive(vmf2.dim / 2 - 1, vmf2.scale)) return vmf1.entropy() - rel_entropy
def _log_normalization(self): output = -(self.__m / 2 - 1) * torch.log(self.scale) \ - (self.__m / 2) * math.log(2 * math.pi)\ - (self.scale + torch.log(ive(self.__m / 2 - 1, self.scale))) return output.view(*([output.flatten().shape[0]]))