Ejemplo n.º 1
0
class FisherSnedecor(Distribution):
    r"""
    Creates a Fisher-Snedecor distribution parameterized by `df1` and `df2`.

    Example::

        >>> m = FisherSnedecor(torch.Tensor([1.0]), torch.Tensor([2.0]))
        >>> m.sample()  # Fisher-Snedecor-distributed with df1=1 and df2=2
         0.2453
        [torch.FloatTensor of size 1]

    Args:
        df1 (float or Tensor or Variable): degrees of freedom parameter 1
        df2 (float or Tensor or Variable): degrees of freedom parameter 2
    """
    params = {'df1': constraints.positive, 'df2': constraints.positive}
    support = constraints.positive
    has_rsample = True

    def __init__(self, df1, df2):
        self.df1, self.df2 = broadcast_all(df1, df2)
        self._gamma1 = Gamma(self.df1 * 0.5, self.df1)
        self._gamma2 = Gamma(self.df2 * 0.5, self.df2)

        if isinstance(df1, Number) and isinstance(df2, Number):
            batch_shape = torch.Size()
        else:
            batch_shape = self.df1.size()
        super(FisherSnedecor, self).__init__(batch_shape)

    def rsample(self, sample_shape=torch.Size(())):
        shape = self._extended_shape(sample_shape)
        #   X1 ~ Gamma(df1 / 2, 1 / df1), X2 ~ Gamma(df2 / 2, 1 / df2)
        #   Y = df2 * df1 * X1 / (df1 * df2 * X2) = X1 / X2 ~ F(df1, df2)
        X1 = self._gamma1.rsample(sample_shape).view(shape)
        X2 = self._gamma2.rsample(sample_shape).view(shape)
        X2.clamp_(min=_finfo(X2).tiny)
        Y = X1 / X2
        Y.clamp_(min=_finfo(X2).tiny)
        return Y

    def log_prob(self, value):
        self._validate_log_prob_arg(value)
        ct1 = self.df1 * 0.5
        ct2 = self.df2 * 0.5
        ct3 = self.df1 / self.df2
        t1 = (ct1 + ct2).lgamma() - ct1.lgamma() - ct2.lgamma()
        t2 = ct1 * ct3.log() + (ct1 - 1) * torch.log(value)
        t3 = (ct1 + ct2) * torch.log1p(ct3 * value)
        return t1 + t2 - t3
Ejemplo n.º 2
0
def compute_stochastic_elbo(a, b, nu, omega, x, y, a_0, b_0, mu_0):
    """
    Return a monte-carlo estimate of the ELBO, using a single sample from Q(sigma^-2, beta)
    
    a, b are the Gamma 'shape' and 'rate' parameters for the variational posterior over *precision*: q(tau) = q(sigma^-2)
    nu_k, omega_k are Normal 'mean' and 'precision' parameters for the variational posterior over weights: q(beta_k)
    x is an n by k matrix, where each row contains the regression inputs [1, x, x^2, x^3]
    y is an n by 1 values
    a_0, b_0 the parameters for the Gamma prior over precision P(tau) = P(sigma^-2)
    mu_0 is the mean of the Gamma prior on weights beta
    """
    
    # Define mean field variational distribution over (beta, tau).
    Q_beta = Normal(nu, omega**-0.5)
    Q_tau = Gamma(a, b) 
    
    # Sample from variational distribution: (tau, beta) ~ Q
    # Use rsample to make sure that the result is differentiable.
    tau = Q_tau.rsample()
    sigma = tau**-0.5
    beta = Q_beta.rsample()
    
    # Create a single sample monte-carlo estimate of ELBO.
    P_tau = Gamma(a_0, b_0) 
    P_beta = Normal(mu_0, sigma) 
    P_y = Normal((beta[None, :]*x).sum(dim=1, keepdim=True), sigma) 
    
    kl_tau = Q_tau.log_prob(tau) - P_tau.log_prob(tau)
    kl_beta = Q_beta.log_prob(beta).sum() - P_beta.log_prob(beta).sum()
    log_likelihood = P_y.log_prob(y).sum()

    elbo = log_likelihood - kl_tau - kl_beta
    return elbo
Ejemplo n.º 3
0
    def train(self, x, sampling=True, independent=True):
        '''
        Parameters
        ----------
        x : a batch of data
        sampling : whether to sample from the variational posterior
        distributions(if Ture, the default), or just use the mean of
        the variational distributions
        
        Return
        ------
        log_likehoods : log like hood for each sample
        kl_sum : Sum of the KL divergences between the variational
            distributions and their priors
        '''

        # The variational distributions
        mu = Normal(self.locs, self.scales)
        sigma = Gamma(self.alpha, self.beta)
        theta = Dirichlet(self.couts)

        # Sample from the variational distributions
        if sampling:
            #            Nb = x.shape[0]
            Nb = 1
            mu_sample = mu.rsample((Nb, ))
            sigma_sample = torch.pow(sigma.rsample((Nb, )), -0.5)
            theta_sample = theta.rsample((Nb, ))
        else:
            mu_sample = torch.reshape(mu.mean, (1, self.Nc, self.Nd))
            sigma_sample = torch.pow(
                torch.reshape(sigma.mean, (1, self.Nc, self.Nd)), -0.5)
            theta_sample = torch.reshape(theta.mean, (1, self.Nc))  # 1*Nc

        # The mixture density
        log_var = (sigma_sample**2).log()
        log_likelihoods = GMM.get_likelihoods(x,
                                              mu_sample.reshape(
                                                  (self.Nc, self.Nd)),
                                              log_var.reshape(
                                                  (self.Nc, self.Nd)),
                                              log=True)  # Nc*Nb

        log_prob_ = theta_sample @ log_likelihoods
        log_prob = log_prob_

        # Compute the KL divergence sum
        mu_div = kl_divergence(mu, self.mu_prior)
        sigma_div = kl_divergence(sigma, self.sigma_prior)
        theta_div = kl_divergence(theta, self.theta_prior)
        KL = mu_div + sigma_div + theta_div
        if 0:
            print("mu_div: %f \t sigma_div: %f \t theta_div: %f" %
                  (mu_div.sum().detach().numpy(),
                   sigma_div.sum().detach().numpy(),
                   theta_div.sum().detach().numpy()))
        return KL, log_prob
Ejemplo n.º 4
0
def generateRandomScenes(shotNb, mean, var=0.5):

    scale = var / mean
    shape = mean / scale

    gam = Gamma(shape, 1 / scale)

    #Generating scene starts index by first generating scene lengths
    starts = torch.cat(
        (torch.tensor([0]), torch.cumsum(gam.rsample(
            (shotNb, )).int(), dim=0)),
        dim=0)
    #Probably to many scenes have been generated. Removing those above the movie limit
    starts = starts[starts < shotNb]

    return starts
Ejemplo n.º 5
0
class FisherSnedecor(Distribution):
    r"""
    Creates a Fisher-Snedecor distribution parameterized by :attr:`df1` and :attr:`df2`.

    Example::

        >>> m = FisherSnedecor(torch.tensor([1.0]), torch.tensor([2.0]))
        >>> m.sample()  # Fisher-Snedecor-distributed with df1=1 and df2=2
        tensor([ 0.2453])

    Args:
        df1 (float or Tensor): degrees of freedom parameter 1
        df2 (float or Tensor): degrees of freedom parameter 2
    """
    arg_constraints = {
        'df1': constraints.positive,
        'df2': constraints.positive
    }
    support = constraints.positive
    has_rsample = True

    def __init__(self, df1, df2, validate_args=None):
        self.df1, self.df2 = broadcast_all(df1, df2)
        self._gamma1 = Gamma(self.df1 * 0.5, self.df1)
        self._gamma2 = Gamma(self.df2 * 0.5, self.df2)

        if isinstance(df1, Number) and isinstance(df2, Number):
            batch_shape = torch.Size()
        else:
            batch_shape = self.df1.size()
        super(FisherSnedecor, self).__init__(batch_shape,
                                             validate_args=validate_args)

    @property
    def mean(self):
        df2 = self.df2.clone()
        df2[df2 <= 2] = nan
        return df2 / (df2 - 2)

    @property
    def variance(self):
        df2 = self.df2.clone()
        df2[df2 <= 4] = nan
        return 2 * df2.pow(2) * (self.df1 + df2 - 2) / (self.df1 *
                                                        (df2 - 2).pow(2) *
                                                        (df2 - 4))

    def rsample(self, sample_shape=torch.Size(())):
        shape = self._extended_shape(sample_shape)
        #   X1 ~ Gamma(df1 / 2, 1 / df1), X2 ~ Gamma(df2 / 2, 1 / df2)
        #   Y = df2 * df1 * X1 / (df1 * df2 * X2) = X1 / X2 ~ F(df1, df2)
        X1 = self._gamma1.rsample(sample_shape).view(shape)
        X2 = self._gamma2.rsample(sample_shape).view(shape)
        X2.clamp_(min=_finfo(X2).tiny)
        Y = X1 / X2
        Y.clamp_(min=_finfo(X2).tiny)
        return Y

    def log_prob(self, value):
        if self._validate_args:
            self._validate_sample(value)
        ct1 = self.df1 * 0.5
        ct2 = self.df2 * 0.5
        ct3 = self.df1 / self.df2
        t1 = (ct1 + ct2).lgamma() - ct1.lgamma() - ct2.lgamma()
        t2 = ct1 * ct3.log() + (ct1 - 1) * torch.log(value)
        t3 = (ct1 + ct2) * torch.log1p(ct3 * value)
        return t1 + t2 - t3
Ejemplo n.º 6
0
class FisherSnedecor(Distribution):
    r"""
    Creates a Fisher-Snedecor distribution parameterized by `df1` and `df2`.

    Example::

        >>> m = FisherSnedecor(torch.tensor([1.0]), torch.tensor([2.0]))
        >>> m.sample()  # Fisher-Snedecor-distributed with df1=1 and df2=2
         0.2453
        [torch.FloatTensor of size 1]

    Args:
        df1 (float or Tensor): degrees of freedom parameter 1
        df2 (float or Tensor): degrees of freedom parameter 2
    """
    arg_constraints = {'df1': constraints.positive, 'df2': constraints.positive}
    support = constraints.positive
    has_rsample = True

    def __init__(self, df1, df2, validate_args=None):
        self.df1, self.df2 = broadcast_all(df1, df2)
        self._gamma1 = Gamma(self.df1 * 0.5, self.df1)
        self._gamma2 = Gamma(self.df2 * 0.5, self.df2)

        if isinstance(df1, Number) and isinstance(df2, Number):
            batch_shape = torch.Size()
        else:
            batch_shape = self.df1.size()
        super(FisherSnedecor, self).__init__(batch_shape, validate_args=validate_args)

    @property
    def mean(self):
        df2 = self.df2.clone()
        df2[df2 <= 2] = float('nan')
        return df2 / (df2 - 2)

    @property
    def variance(self):
        df2 = self.df2.clone()
        df2[df2 <= 4] = float('nan')
        return 2 * df2.pow(2) * (self.df1 + df2 - 2) / (self.df1 * (df2 - 2).pow(2) * (df2 - 4))

    def rsample(self, sample_shape=torch.Size(())):
        shape = self._extended_shape(sample_shape)
        #   X1 ~ Gamma(df1 / 2, 1 / df1), X2 ~ Gamma(df2 / 2, 1 / df2)
        #   Y = df2 * df1 * X1 / (df1 * df2 * X2) = X1 / X2 ~ F(df1, df2)
        X1 = self._gamma1.rsample(sample_shape).view(shape)
        X2 = self._gamma2.rsample(sample_shape).view(shape)
        X2.clamp_(min=_finfo(X2).tiny)
        Y = X1 / X2
        Y.clamp_(min=_finfo(X2).tiny)
        return Y

    def log_prob(self, value):
        if self._validate_args:
            self._validate_sample(value)
        ct1 = self.df1 * 0.5
        ct2 = self.df2 * 0.5
        ct3 = self.df1 / self.df2
        t1 = (ct1 + ct2).lgamma() - ct1.lgamma() - ct2.lgamma()
        t2 = ct1 * ct3.log() + (ct1 - 1) * torch.log(value)
        t3 = (ct1 + ct2) * torch.log1p(ct3 * value)
        return t1 + t2 - t3