def __init__(self, probability):
        """Initialize this distribution with probability.

        input:
        probability - Type: Float tensor
        """
        self.probability = VariableCast(probability)
    def __init__(self, mean, cov):
        """Initialize this distribution with mean, cov.

       input:
           mean: n by 1
           cov: covariance matrix, n by n
       """
        self.mean = VariableCast(mean)
        self.cov = VariableCast(cov)
        assert self.mean.data.size()[0] == self.cov.data.size()[
            0]  #, "ERROR! mean and cov have different size!")
        self.dim = self.mean.data.size()[0]
        self.chol_std = VariableCast(torch.potrf(
            self.cov.data).t())  # lower triangle
        self.chol_std_inv = torch.inverse(self.chol_std)
class Laplace(ContinuousRandomVariable):
    '''
    Laplace random variable

    Methods
    -------
    sample X ~ Laplace(location, scale)
    logpdf

    Attributes
    ----------
    location - Type torch.autograd.Variable, torch.Tensor, nparray
               Size \mathbb{R}^{1 x N}
    scale    - Type torch.autograd.Variable, torch.Tensor, nparray
               Size \mathbb{R}^{1 x N}
    '''
    def __init__(self, location, scale):
        self.location = VariableCast(location)
        self.scale = VariableCast(scale)

    def sample(self):
        # https: // en.wikipedia.org / wiki / Laplace_distribution
        uniforms = torch.Tensor(self.location.size()).uniform_() - 0.5
        uniforms = VariableCast(uniforms)
        return self.location - self._scale * torch.sign(uniforms) * \
                                torch.log(1 - 2 * torch.abs(uniforms))

    def logpdf(self, value):
        return -torch.div(torch.abs(value - self._location),
                          self._scale) - torch.log(2 * self._scale)
 def logpdf(self, value):
     """
    value : obs value, should be n by 1
    :return: scalar, log pdf value
    """
     value = VariableCast(value)
     cov_det = self.chol_std.diag().prod()**2
     log_norm_constant = 0.5 * self.dim * torch.log(torch.Tensor([2 * np.pi])) \
                         + 0.5*torch.log(cov_det.data)
     right = torch.matmul(self.chol_std_inv, value - self.mean)
     # print(value, self.mean, value - self.mean)
     log_p = -Variable(log_norm_constant) - 0.5 * torch.matmul(
         torch.t(right), right)
     return log_p
class MultivariateNormal(ContinuousRandomVariable):
    """Normal random variable"""
    def __init__(self, mean, cov):
        """Initialize this distribution with mean, cov.

       input:
           mean: n by 1
           cov: covariance matrix, n by n
       """
        self.mean = VariableCast(mean)
        self.cov = VariableCast(cov)
        assert self.mean.data.size()[0] == self.cov.data.size()[
            0]  #, "ERROR! mean and cov have different size!")
        self.dim = self.mean.data.size()[0]
        self.chol_std = VariableCast(torch.potrf(
            self.cov.data).t())  # lower triangle
        self.chol_std_inv = torch.inverse(self.chol_std)

    def sample(self, num_samples=1):
        zs = torch.randn(self.dim, 1)
        # print("zs", zs)
        # samples = Variable( self.mean.data + torch.matmul(self.chol_std.data, zs), requires_grad = True)
        return self.mean.data + torch.matmul(self.chol_std.data, zs)

    def logpdf(self, value):
        """
       value : obs value, should be n by 1
       :return: scalar, log pdf value
       """
        value = VariableCast(value)
        cov_det = self.chol_std.diag().prod()**2
        log_norm_constant = 0.5 * self.dim * torch.log(torch.Tensor([2 * np.pi])) \
                            + 0.5*torch.log(cov_det.data)
        right = torch.matmul(self.chol_std_inv, value - self.mean)
        # print(value, self.mean, value - self.mean)
        log_p = -Variable(log_norm_constant) - 0.5 * torch.matmul(
            torch.t(right), right)
        return log_p
Example #6
0
 def sample_momentum(self,values):
     return VariableCast(torch.randn(1,VariableCast(values).data.size()[0]))
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Author: Bradley Gram-Hansen
Time created:  08:59
Date created:  05/09/2017

License: MIT
'''
import torch
from Utils.core import VariableCast
from torch.autograd import Variable
import Distributions.distributions as dis

c24039 = VariableCast(1.0)
c24040 = VariableCast(2.0)
x24041 = dis.Normal(c24039, c24040)
x22542 = x24041.sample()  #sample
if isinstance(Variable, x22542):
    x22542 = Variable(x22542.data, requires_grad=True)
else:
    x22542 = Variable(VariableCast(x22542).data, requires_grad=True)
p24042 = x24041.logpdf(x22542)
c24043 = VariableCast(3.0)
x24044 = dis.Normal(x22542, c24043)
c24045 = VariableCast(7.0)
y22543 = c24045
p24046 = x24044.logpdf(y22543)
p24047 = Variable.add(p24042, p24046)

print(x22542)
 def logpdf(self, value):
     value = VariableCast(value)
     # pdf: 1 / torch.sqrt(2 * var * np.pi) * torch.exp(-0.5 * torch.pow(value - mean, 2) / var)
     return (-0.5 * torch.pow(value - self.mean, 2) /
             self.std**2) - torch.log(self.std)
 def __init__(self, mean, std):
     self.mean = VariableCast(mean)
     self.std = VariableCast(std)
 def sample(self):
     # https: // en.wikipedia.org / wiki / Laplace_distribution
     uniforms = torch.Tensor(self.location.size()).uniform_() - 0.5
     uniforms = VariableCast(uniforms)
     return self.location - self._scale * torch.sign(uniforms) * \
                             torch.log(1 - 2 * torch.abs(uniforms))
 def __init__(self, location, scale):
     self.location = VariableCast(location)
     self.scale = VariableCast(scale)
    def logpmf(self, value, epsilon=1e-10):
        assert (value.size() == self._probability.size())
        value = VariableCast(value)
        test        = value * torch.log(self.probability + epsilon) +\
                      (1 - value) * torch.log(1 - self.probability + epsilon)
        print(test)
        # return torch.sum(
        #         value * torch.log(self.probability + epsilon) +
        #         (1 - value) * torch.log(1 - self.probability + epsilon))


# ---------------------------------------------------------------------------------------
# Unused and maybe used in the future
# ---------------------------------------------------------------------------------------
# class MultivariateIndependentLaplace(ContinuousRandomVariable):
#     """MultivariateIndependentLaplace random variable"""
#     def __init__(self, location, scale):
#         """Initialize this distribution with location, scale.
#
#         input:
#             location: Tensor/Variable
#                 [ 1, ..., N]
#             scale: Tensor/Variable
#                 [1, ..., N]
#         """
#         self._location = location
#         self._scale = scale
#
#     def sample(self, batch_size, num_particles):
#         uniforms = torch.Tensor(self._location.size()).uniform_() - 0.5
#         if isinstance(self._location, Variable):
#             uniforms = Variable(uniforms)
#             return self._location.detach() - self._scale.detach() * \
#                 torch.sign(uniforms) * torch.log(1 - 2 * torch.abs(uniforms))
#         else:
#             return self._location - self._scale * torch.sign(uniforms) * \
#                 torch.log(1 - 2 * torch.abs(uniforms))
#     def sample(self,num_samples):
#         uniforms = torch.Tensor(self._location.size()).uniform_() - 0.5
#         # why the half that would make the scale between [-0.5,0.5]
#         if isinstance(self._location, Variable):
#             uniforms = Variable(uniforms)
#             return self._location.detach() - self._scale.detach() * \
#                 torch.sign(uniforms) * torch.log(1 - 2 * torch.abs(uniforms))
#         else:
#             return self._location - self._scale * torch
#     def sample_reparameterized(self, num_samples):
#
#         standard_laplace = MultivariateIndependentLaplace(
#             location=VariableCast(torch.zeros(self._location.size())),
#             scale=VariableCast(torch.ones(self._scale.size()))
#         )
#
#         return self._location + self._scale * standard_laplace.sample(num_samples)
#         )
#
#     def pdf(self, value, batch_size, num_particles):
#         assert(value.size() == self._location.size())
#         assert(list(self._location.size()[:2]) == [batch_size, num_particles])
#
#         return torch.prod(
#             (
#                 torch.exp(-torch.abs(value - self._location) / self._scale) /
#                 (2 * self._scale)
#             ).view(batch_size, num_particles, -1),
#             dim=2
#         ).squeeze(2)
#
#     def logpdf(self, value, batch_size, num_particles):
#         assert(value.size() == self._location.size())
#         assert(list(self._location.size()[:2]) == [batch_size, num_particles])
#
#         return torch.sum(
#             (
#                 -torch.abs(value - self._location) /
#                 self._scale - torch.log(2 * self._scale)
#             ).view(batch_size, num_particles, -1),
#             dim=2
#         ).squeeze(2)

# class MultivariateNormal(ContinuousRandomVariable):
#     """MultivariateIndependentNormal simple class"""
#     def __init__(self, mean, covariance):
#         """Initialize this distribution with mean, covariance.
#
#         input:
#             mean: Tensor/Variable
#                 [ dim_1, ..., dim_N]
#             covariance: Tensor/Variable
#                 covariance \in \mathbb{R}^{N \times N}
#         """
#         assert(mean.size()[0] == covariance.size()[0])
#         assert (mean.size()[0] == covariance.size()[1])
#         self._mean = mean
#         self._covariance = covariance
#         # cholesky decomposition returns upper triangular matrix. Will not accept Variables
#         self._L = torch.potrf(self._covariance.data)
#     def sample(self):
#         # Returns a sample of a multivariate normal X ~ N(mean, cov)
#         # A column vecotor of X ~ N(0,I)
#         uniform_normals = torch.Tensor(self._mean.size()).normal_().t()
#
#         if isinstance(self._mean, Variable):
#             return self._mean.detach() + \
#                 Variable(self._L.t().mm(uniform_normals))
#         else:
#             return self._L.t().mm(uniform_normals) + self._mean
#
#     def pdf(self, value):
#         assert(value.size() == self._mean.size())
#         # CAUTION: If the covariance is 'Unknown' then we will
#         # not be returned the correct derivatives.
#         print('****** Warning ******')
#         print(' IF COVARIANCE IS UNKNOWN AND THE DERIVATIVES ARE NEEDED W.R.T IT, THIS RETURNED FUNCTION \n \
#         WILL NOT RECORD THE GRAPH STRUCTURE OF THE COVARIANCE' )
#         value = VariableCast(value)
#         # the sqrt root of a det(cov) : sqrt(det(cov)) == det(L.t()) = \Pi_{i=0}^{N} L_{ii}
#         self._constant = torch.pow(2*np.pi,value.size()[1]) * self._L.t().diag().prod()
#         return self._constant * torch.exp(-0.5*(value - self._mean).mm(self._L.inverse().mm(self._L.inverse().t())).mm((value - self._mean).t()))
#         #     torch.prod(
#         #     (
#         #         1 / torch.sqrt(2 * self._variance * np.pi) * torch.exp(
#         #             -0.5 * (value - self._mean)**2 / self._variance
#         #         )
#         #     ).view(-1),
#         #     dim=0
#         # ).squeeze(0)
#     # squeeze doesn't do anything here, for our use.
#     # view(-1), infers to change the structure of the
#     # calculation, so it is transformed to a column vector
#     # dim = 0, implies that we take the products all down the
#     # rows