def entropy(self):
        # option 1:
        output = (-self.scale * ive(self.__m / 2, self.scale) / ive(
            (self.__m / 2) - 1, self.scale))
        # option 2:
        # output = - self.scale * ive_fraction_approx(torch.tensor(self.__m / 2), self.scale)
        # option 3:
        # output = - self.scale * ive_fraction_approx2(torch.tensor(self.__m / 2), self.scale)

        return output.view(*(output.shape[:-1])) + self._log_normalization()
예제 #2
0
    def _log_normalization(self):
        output = -(
            (self.__mf / 2 - 1) * math_ops.log(self.scale) -
            (self.__mf / 2) * math.log(2 * math.pi) -
            (self.scale + math_ops.log(ive(self.__mf / 2 - 1, self.scale))))

        return array_ops.reshape(
            output, ops.convert_to_tensor(array_ops.shape(output)[:-1]))
 def mean(self):
     return self.loc * (ive(self.__m / 2, self.scale) /
                        ive(self.__m / 2 - 1, self.scale))
    def _log_normalization(self):
        output = -((self.__m / 2 - 1) * torch.log(self.scale) -
                   (self.__m / 2) * math.log(2 * math.pi) -
                   (self.scale + torch.log(ive(self.__m / 2 - 1, self.scale))))

        return output.view(*(output.shape[:-1]))
    def entropy(self):
        output = -self.scale * ive(self.__m / 2, self.scale) / ive(
            (self.__m / 2) - 1, self.scale)

        return output.view(*(output.shape[:-1])) + self._log_normalization()
예제 #6
0
 def _mean(self):
     return self._loc * (ive(self.__mf / 2, self.scale) /
                         ive(self.__mf / 2 - 1, self.scale))
예제 #7
0
 def _entropy(self):
     return -array_ops.reshape(
         self.scale * ive(self.__mf / 2, self.scale) / ive(
             (self.__mf / 2) - 1, self.scale),
         ops.convert_to_tensor(array_ops.shape(
             self.scale)[:-1])) + self._log_normalization()
예제 #8
0
def _kl_vmf_vmf(vmf1, vmf2):
    rel_entropy = ((vmf2.dim * 0.5) - 1) * torch.log(
        vmf2.scale) - (vmf2.dim * 0.5) * math.log(2 * math.pi) - torch.log(
            ive(vmf2.dim / 2 - 1, vmf2.scale))
    return vmf1.entropy() - rel_entropy
예제 #9
0
 def _log_normalization(self):
     output = -(self.__m / 2 - 1) * torch.log(self.scale) \
              - (self.__m / 2) * math.log(2 * math.pi)\
              - (self.scale + torch.log(ive(self.__m / 2 - 1, self.scale)))
     return output.view(*([output.flatten().shape[0]]))