Esempio n. 1
0
    def forward(self, x):
        """Return sample of latent variable and log prob."""
        x = x.type(torch.FloatTensor)
        loc_arg, scale_arg = torch.chunk(self.inference_network(x),
                                         chunks=2,
                                         dim=-1)
        loc = self.softplus(loc_arg)
        scale = self.softplus(scale_arg)

        all_gama = Gamma(loc, scale)
        scores = kl0._kl_gamma_gamma(all_gama, self.target_function)
        scores = scores.mean(dim=-1)
        zrnd = all_gama.sample(sample_shape=(self.n_samples, ))
        #find params

        mz = zrnd.mean(dim=0)
        ms = zrnd.var(dim=0)
        beta = mz / ms
        alpha = mz * beta
        gama_z = Gamma(alpha, beta)
        z_score = kl0._kl_gamma_gamma(gama_z, self.target_function)
        z_score = z_score.mean(dim=-1)
        scores = torch.unsqueeze(scores, dim=-1)
        z_score = torch.unsqueeze(z_score, dim=-1)

        return zrnd, scores, z_score
Esempio n. 2
0
    def log_likelihood(self, x_norm, y_norm):
        mean, var, shape, rate, mixture_var = self(x_norm)
        norm_dist = Normal(mean, torch.sqrt(var))
        gamma_dist = Gamma(shape, rate)
        y = y_norm * self.y_std + self.y_mean + 10**(-4)

        only_normal_bool = (torch.abs(1 - mixture_var) < 10**(-4)).type(
            torch.float)
        only_gamma_bool = (mixture_var < 10**(-4)).type(torch.float)

        normal_component = norm_dist.log_prob(y_norm) + torch.log(mixture_var)
        gamma_component = gamma_dist.log_prob(y) + torch.log(1 - mixture_var)

        combined_tensor = torch.stack((normal_component, gamma_component),
                                      dim=0)
        output = torch.logsumexp(combined_tensor, dim=0)

        if mixture_var < 0.9:
            logging.debug("Mixture var: {}".format(float(mixture_var.mean())))
            logging.debug("NLLs: {:.3f}, {:.3f}".format(
                -float(norm_dist.log_prob(y_norm).mean()),
                -float(gamma_dist.log_prob(y).mean()),
            ))
            logging.debug("Combined NLL: {:.3f} or {:.3f}".format(
                -float(output.mean()), -float(old_output)))

        return output.mean()
Esempio n. 3
0
def gibbs_sweep(generative, x, z, trace):
    """
    Gibbs updates
    """
    post_alpha, post_beta, post_mu, post_nu = posterior_eta(x,
                                                            z=z,
                                                            prior_alpha=generative.prior_alpha,
                                                            prior_beta=generative.prior_beta,
                                                            prior_mu=generative.prior_mu,
                                                            prior_nu=generative.prior_nu)

    E_tau = (post_alpha / post_beta).mean(0)
    E_mu = post_mu.mean(0)
    tau = Gamma(post_alpha, post_beta).sample()
    mu = Normal(post_mu, 1. / (post_nu * tau).sqrt()).sample()
    posterior_logits = posterior_z(x, tau, mu, generative.prior_pi)
    E_z = posterior_logits.exp().mean(0)
    z = cat(logits=posterior_logits).sample()
    ll = generative.log_prob(x, z=z, tau=tau, mu=mu, aggregate=True)
    log_prior_tau = Gamma(generative.prior_alpha, generative.prior_beta).log_prob(tau).sum(-1).sum(-1)
    log_prior_mu = Normal(generative.prior_mu, 1. / (generative.prior_nu * tau).sqrt()).log_prob(mu).sum(-1).sum(-1)
    log_prior_z = cat(probs=generative.prior_pi).log_prob(z).sum(-1)
    log_joint = ll + log_prior_tau + log_prior_mu + log_prior_z
    trace['density'].append(log_joint.unsqueeze(0)) # 1-by-B-length vector
    return tau, mu, z, trace
Esempio n. 4
0
def gamma_ll(target_vals, v):
    """
    Evaluate gamma-bernoulli mixture likelihood
    Parameters:
    ----------
    v: torch.Tensor(batch,86,channels)
        parameters from model [rho, alpha, beta]
    target_vals: torch.Tensor(batch,86)
        target vals to eval at
    """

    # Reshape
    target_vals = target_vals.reshape(-1)
    v = v.reshape(-1, 3)

    # Deal with cases where data is missing for a station
    v = v[~torch.isnan(target_vals), :]
    target_vals = target_vals[~torch.isnan(target_vals)]

    # Make r mask
    r, target_vals = make_r_mask(target_vals)

    gamma = Gamma(concentration=v[:, 1], rate=v[:, 2])
    logp = gamma.log_prob(target_vals)

    total = r * (torch.log(v[:, 0]) + logp) + (1 - r) * torch.log(1 - v[:, 0])

    return torch.mean(total)
Esempio n. 5
0
    def test_loss(self, test_data):
        """
        outputs the losses the test data
        """
        x, y = test_data[:]
        if not test_data.x_normalised:
            constant_x = self.x_std == 0
            x = (x - self.x_mean) / self.x_std
            x[:, constant_x] = 0
        self.train(False)
        shape, rate = self(x)

        y = y.squeeze()
        shape = shape.squeeze()
        rate = rate.squeeze()

        gamma_dist = Gamma(shape, rate)

        test_nll = -gamma_dist.log_prob(y + 10**(-8)).mean()
        test_rmse = (((y - gamma_dist.mean)**2).mean())**0.5
        calibration_arr = self.calibration_test(y.detach().numpy(),
                                                shape.detach().numpy(),
                                                rate.detach().numpy())

        return float(test_nll), float(test_rmse), calibration_arr
Esempio n. 6
0
def gamma_logpdf(inputs, loc, scale, reduction=None):
    """Gamma log-density.

    Args:
        inputs (tensor): Inputs.
        mean (tensor): Mean.
        sigma (tensor): Standard deviation.
        reduction (str, optional): Reduction. Defaults to no reduction.
            Possible values are "sum", "mean", and "batched_mean".

    Returns:
        tensor: Log-density.
    """
    dist = Gamma(concentration=loc, rate=scale)
    logp = dist.log_prob(inputs)

    if not reduction:
        return logp
    elif reduction == 'sum':
        return torch.sum(logp)
    elif reduction == 'mean':
        return torch.mean(logp)
    elif reduction == 'batched_mean':
        return torch.mean(torch.sum(logp, 1))
    else:
        raise RuntimeError(f'Unknown reduction "{reduction}".')
Esempio n. 7
0
    def __init__(self, Nc, Nd):
        '''
        Nc : number of components
        Nd : number of dimension
        '''
        # Initialize
        super(GaussianMixtureModel, self).__init__()
        self.Nc = Nc
        self.Nd = Nd

        # Variational distribution variables for means: u ~ Normal (locs, scales)
        self.locs = Variable(torch.normal(10 * torch.zeros((Nc, Nd)), 1),
                             requires_grad=True)
        self.scales = Variable(torch.pow(Gamma(5, 5).rsample((Nc, Nd)), -0.5),
                               requires_grad=True)  # ??

        # VDV for standard deviations : sigma ~ Gamma(alpla, beta)
        self.alpha = Variable(torch.rand(Nc, Nd) * 2 + 4,
                              requires_grad=True)  # 4 is hyperparameters
        self.beta = Variable(torch.rand(Nc, Nd) * 2 + 4,
                             requires_grad=True)  # 4 is hyperparameters

        # VDV for component weights: theta ~ Dir(C)
        self.couts = Variable(2 * torch.ones((Nc, )),
                              requires_grad=True)  # 2 is hyperparameters

        # Prior distributions for the means
        self.mu_prior = Normal(torch.zeros((Nc, Nd)), torch.ones((Nc, Nd)))

        # Prior distributions for the standard deviations
        self.sigma_prior = Gamma(5 * torch.ones((Nc, Nd)), 5 * torch.ones(
            (Nc, Nd)))

        # Prior distributions for the components weights
        self.theta_prior = Dirichlet(5 * torch.ones((Nc, )))  # uniform 0.2 * 5
Esempio n. 8
0
    def train(self, x, sampling=True, independent=True):
        '''
        Parameters
        ----------
        x : a batch of data
        sampling : whether to sample from the variational posterior
        distributions(if Ture, the default), or just use the mean of
        the variational distributions
        
        Return
        ------
        log_likehoods : log like hood for each sample
        kl_sum : Sum of the KL divergences between the variational
            distributions and their priors
        '''

        # The variational distributions
        mu = Normal(self.locs, self.scales)
        sigma = Gamma(self.alpha, self.beta)
        theta = Dirichlet(self.couts)

        # Sample from the variational distributions
        if sampling:
            #            Nb = x.shape[0]
            Nb = 1
            mu_sample = mu.rsample((Nb, ))
            sigma_sample = torch.pow(sigma.rsample((Nb, )), -0.5)
            theta_sample = theta.rsample((Nb, ))
        else:
            mu_sample = torch.reshape(mu.mean, (1, self.Nc, self.Nd))
            sigma_sample = torch.pow(
                torch.reshape(sigma.mean, (1, self.Nc, self.Nd)), -0.5)
            theta_sample = torch.reshape(theta.mean, (1, self.Nc))  # 1*Nc

        # The mixture density
        log_var = (sigma_sample**2).log()
        log_likelihoods = GMM.get_likelihoods(x,
                                              mu_sample.reshape(
                                                  (self.Nc, self.Nd)),
                                              log_var.reshape(
                                                  (self.Nc, self.Nd)),
                                              log=True)  # Nc*Nb

        log_prob_ = theta_sample @ log_likelihoods
        log_prob = log_prob_

        # Compute the KL divergence sum
        mu_div = kl_divergence(mu, self.mu_prior)
        sigma_div = kl_divergence(sigma, self.sigma_prior)
        theta_div = kl_divergence(theta, self.theta_prior)
        KL = mu_div + sigma_div + theta_div
        if 0:
            print("mu_div: %f \t sigma_div: %f \t theta_div: %f" %
                  (mu_div.sum().detach().numpy(),
                   sigma_div.sum().detach().numpy(),
                   theta_div.sum().detach().numpy()))
        return KL, log_prob
Esempio n. 9
0
    def __init__(self, df1, df2, validate_args=None):
        self.df1, self.df2 = broadcast_all(df1, df2)
        self._gamma1 = Gamma(self.df1 * 0.5, self.df1)
        self._gamma2 = Gamma(self.df2 * 0.5, self.df2)

        if isinstance(df1, Number) and isinstance(df2, Number):
            batch_shape = torch.Size()
        else:
            batch_shape = self.df1.size()
        super(FisherSnedecor, self).__init__(batch_shape, validate_args=validate_args)
Esempio n. 10
0
    def __init__(self, df1, df2):
        self.df1, self.df2 = broadcast_all(df1, df2)
        self._gamma1 = Gamma(self.df1 * 0.5, self.df1)
        self._gamma2 = Gamma(self.df2 * 0.5, self.df2)

        if isinstance(df1, Number) and isinstance(df2, Number):
            batch_shape = torch.Size()
        else:
            batch_shape = self.df1.size()
        super(FisherSnedecor, self).__init__(batch_shape)
Esempio n. 11
0
 def __init__(self, X_train, y_train, batch_size, num_particles,
              hidden_dim):
     self.gamma_prior = Gamma(torch.tensor(1., device=device),
                              torch.tensor(1 / 0.1, device=device))
     self.lambda_prior = Gamma(torch.tensor(1., device=device),
                               torch.tensor(1 / 0.1, device=device))
     self.X_train = X_train
     self.y_train = y_train
     self.batch_size = batch_size
     self.num_particles = num_particles
     self.n_features = X_train.shape[1]
     self.hidden_dim = hidden_dim
Esempio n. 12
0
File: flows.py Progetto: jhss/CTNF
    def forward(self, inputs):
        s = torch.sum(inputs, dim=1)
        dirichlet = inputs / s.reshape(-1, 1)
        sum_preds = self.pi(dirichlet).squeeze(1)
        #print("sum_preds: ", sum_preds)
        #print("s : ", s)
        logpsz = Gamma(
            sum_preds,
            1).log_prob(s) - self.num_inputs * torch.log(sum_preds + 10e-9)

        #print("Dirichlet: ", dirichlet)
        #print("logpsz: ", logpsz)
        return dirichlet, logpsz.unsqueeze(1)
Esempio n. 13
0
class FisherSnedecor(Distribution):
    r"""
    Creates a Fisher-Snedecor distribution parameterized by `df1` and `df2`.

    Example::

        >>> m = FisherSnedecor(torch.Tensor([1.0]), torch.Tensor([2.0]))
        >>> m.sample()  # Fisher-Snedecor-distributed with df1=1 and df2=2
         0.2453
        [torch.FloatTensor of size 1]

    Args:
        df1 (float or Tensor or Variable): degrees of freedom parameter 1
        df2 (float or Tensor or Variable): degrees of freedom parameter 2
    """
    params = {'df1': constraints.positive, 'df2': constraints.positive}
    support = constraints.positive
    has_rsample = True

    def __init__(self, df1, df2):
        self.df1, self.df2 = broadcast_all(df1, df2)
        self._gamma1 = Gamma(self.df1 * 0.5, self.df1)
        self._gamma2 = Gamma(self.df2 * 0.5, self.df2)

        if isinstance(df1, Number) and isinstance(df2, Number):
            batch_shape = torch.Size()
        else:
            batch_shape = self.df1.size()
        super(FisherSnedecor, self).__init__(batch_shape)

    def rsample(self, sample_shape=torch.Size(())):
        shape = self._extended_shape(sample_shape)
        #   X1 ~ Gamma(df1 / 2, 1 / df1), X2 ~ Gamma(df2 / 2, 1 / df2)
        #   Y = df2 * df1 * X1 / (df1 * df2 * X2) = X1 / X2 ~ F(df1, df2)
        X1 = self._gamma1.rsample(sample_shape).view(shape)
        X2 = self._gamma2.rsample(sample_shape).view(shape)
        X2.clamp_(min=_finfo(X2).tiny)
        Y = X1 / X2
        Y.clamp_(min=_finfo(X2).tiny)
        return Y

    def log_prob(self, value):
        self._validate_log_prob_arg(value)
        ct1 = self.df1 * 0.5
        ct2 = self.df2 * 0.5
        ct3 = self.df1 / self.df2
        t1 = (ct1 + ct2).lgamma() - ct1.lgamma() - ct2.lgamma()
        t2 = ct1 * ct3.log() + (ct1 - 1) * torch.log(value)
        t3 = (ct1 + ct2) * torch.log1p(ct3 * value)
        return t1 + t2 - t3
Esempio n. 14
0
    def __init__(self, model_gp, likelihood_gp, hyperpriors: dict) -> None:
        self.model_gp = model_gp
        self.likelihood_gp = likelihood_gp
        self.hyperpriors = hyperpriors

        a_beta = self.hyperpriors["lengthscales"].kwds["a"]
        b_beta = self.hyperpriors["lengthscales"].kwds["b"]

        self.Beta_tmp = Beta(concentration1=a_beta, concentration0=b_beta)

        a_gg = self.hyperpriors["outputscale"].kwds["a"]
        b_gg = self.hyperpriors["outputscale"].kwds["scale"]

        self.Gamma_tmp = Gamma(concentration=a_gg, rate=1. / b_gg)
Esempio n. 15
0
 def forward(self, ob, prior_ng, sampled=True, tau_old=None, mu_old=None):
     q = probtorch.Trace()
     (prior_alpha, prior_beta, prior_mu, prior_nu) = prior_ng
     q_alpha, q_beta, q_mu, q_nu = posterior_eta(self.ob(ob),
                                                 self.gamma(ob),
                                                 prior_alpha, prior_beta,
                                                 prior_mu, prior_nu)
     if sampled:  ## used in forward transition kernel where we need to sample
         tau = Gamma(q_alpha, q_beta).sample()
         q.gamma(q_alpha, q_beta, value=tau, name='precisions')
         mu = Normal(q_mu,
                     1. / (q_nu * q['precisions'].value).sqrt()).sample()
         q.normal(
             q_mu,
             1. /
             (q_nu *
              q['precisions'].value).sqrt(),  # std = 1 / sqrt(nu * tau)
             value=mu,
             name='means')
     else:  ## used in backward transition kernel where samples were given from last sweep
         q.gamma(q_alpha, q_beta, value=tau_old, name='precisions')
         q.normal(q_mu,
                  1. / (q_nu * q['precisions'].value).sqrt(),
                  value=mu_old,
                  name='means')
     return q
Esempio n. 16
0
    def forward(self,
                ob,
                z,
                prior_ng,
                sampled=True,
                tau_old=None,
                mu_old=None):
        q = probtorch.Trace()
        (prior_alpha, prior_beta, prior_mu, prior_nu) = prior_ng
        ob_z = torch.cat(
            (ob, z), -1)  # concatenate observations and cluster asssignemnts
        q_alpha, q_beta, q_mu, q_nu = posterior_eta(self.ob(ob_z),
                                                    self.gamma(ob_z),
                                                    prior_alpha, prior_beta,
                                                    prior_mu, prior_nu)

        if sampled == True:
            tau = Gamma(q_alpha, q_beta).sample()
            q.gamma(q_alpha, q_beta, value=tau, name='precisions')
            mu = Normal(q_mu,
                        1. / (q_nu * q['precisions'].value).sqrt()).sample()
            q.normal(q_mu,
                     1. / (q_nu * q['precisions'].value).sqrt(),
                     value=mu,
                     name='means')
        else:
            q.gamma(q_alpha, q_beta, value=tau_old, name='precisions')
            q.normal(q_mu,
                     1. / (q_nu * q['precisions'].value).sqrt(),
                     value=mu_old,
                     name='means')
        return q
Esempio n. 17
0
    def fit(self, train_data):
        self.train(True)
        self.x_mean, self.x_std = train_data.normalise_x()
        data_generator = data.DataLoader(train_data, batch_size=self.batch_size)

        optimiser = torch.optim.Adam(self.parameters(), lr=self.lr)

        for _ in torch.arange(self.n_epochs):
            for i, sample in enumerate(data_generator):
                x, y = sample
                shape, rate = self(x)
                gamma_dist = Gamma(shape, rate)
                optimiser.zero_grad()
                loss = -gamma_dist.log_prob(y.squeeze() + 10 ** (-8)).mean()
                loss.backward()
                optimiser.step()
Esempio n. 18
0
def generateRandomScenes(shotNb, mean, var=0.5):

    scale = var / mean
    shape = mean / scale

    gam = Gamma(shape, 1 / scale)

    #Generating scene starts index by first generating scene lengths
    starts = torch.cat(
        (torch.tensor([0]), torch.cumsum(gam.rsample(
            (shotNb, )).int(), dim=0)),
        dim=0)
    #Probably to many scenes have been generated. Removing those above the movie limit
    starts = starts[starts < shotNb]

    return starts
Esempio n. 19
0
def sample_from_posterior(x, a, b, mu, lmbda):
    """
    Sample betas from the posterior distribution.
    """
    variance = 1./Gamma(a, b).sample()
    betas = MN(mu.squeeze(), variance*lmbda.inverse()).sample()
    return polynomial(betas.np(), x)
Esempio n. 20
0
    def loss(self,
             y_obs,
             y_pred,
             dropout_prob_logit,
             theta,
             batch_scale=1.0,
             beta=1.0):

        prior_loss = 0.0

        if self.translation_invariance:
            prior_loss += self.get_delta().pow(2).sum()

        if self.scale_invariance:
            lambdas = self.get_lambda()
            prior_loss -= Gamma(
                torch.ones_like(lambdas),
                torch.ones_like(lambdas)).log_prob(lambdas).sum()

        loglik = batch_scale * self.loglik(y_obs, y_pred, dropout_prob_logit,
                                           theta)

        decoder_loss = -loglik + beta * self.KL_phi() + prior_loss

        return decoder_loss
Esempio n. 21
0
def tau_prior(a_0, b_0):
    """
    Prior over the precision, i.e., tau = 1/sigma^2 
    
    To sample variance:
    variance = 1./tau_prior(a_0, b_0).sample()
    """
    return Gamma(a_0, b_0)
Esempio n. 22
0
def compute_stochastic_elbo(a, b, nu, omega, x, y, a_0, b_0, mu_0):
    """
    Return a monte-carlo estimate of the ELBO, using a single sample from Q(sigma^-2, beta)
    
    a, b are the Gamma 'shape' and 'rate' parameters for the variational posterior over *precision*: q(tau) = q(sigma^-2)
    nu_k, omega_k are Normal 'mean' and 'precision' parameters for the variational posterior over weights: q(beta_k)
    x is an n by k matrix, where each row contains the regression inputs [1, x, x^2, x^3]
    y is an n by 1 values
    a_0, b_0 the parameters for the Gamma prior over precision P(tau) = P(sigma^-2)
    mu_0 is the mean of the Gamma prior on weights beta
    """
    
    # Define mean field variational distribution over (beta, tau).
    Q_beta = Normal(nu, omega**-0.5)
    Q_tau = Gamma(a, b) 
    
    # Sample from variational distribution: (tau, beta) ~ Q
    # Use rsample to make sure that the result is differentiable.
    tau = Q_tau.rsample()
    sigma = tau**-0.5
    beta = Q_beta.rsample()
    
    # Create a single sample monte-carlo estimate of ELBO.
    P_tau = Gamma(a_0, b_0) 
    P_beta = Normal(mu_0, sigma) 
    P_y = Normal((beta[None, :]*x).sum(dim=1, keepdim=True), sigma) 
    
    kl_tau = Q_tau.log_prob(tau) - P_tau.log_prob(tau)
    kl_beta = Q_beta.log_prob(beta).sum() - P_beta.log_prob(beta).sum()
    log_likelihood = P_y.log_prob(y).sum()

    elbo = log_likelihood - kl_tau - kl_beta
    return elbo
Esempio n. 23
0
 def sim_one_gmm(self):
     precision = Gamma(torch.ones((self.K, self.D)) * self.alpha, torch.ones((self.K, self.D)) * self.beta).sample()
     sigma_of_mean = 1. / (precision * self.nu).sqrt()
     sigma = 1. / torch.sqrt(precision)
     mean = Normal(torch.ones((self.K, self.D)) * self.mu, sigma_of_mean).sample()
     assignment = cat(torch.ones(self.K) * (1. / self.K)).sample((self.N,))
     labels = assignment.argmax(-1)
     ob = Normal(mean[labels], sigma[labels]).sample()
     return ob.data.numpy(), precision.data.numpy(), mean.data.numpy(), assignment.data.numpy()
Esempio n. 24
0
    def rmse(self, x_norm, y_norm):
        mean, var, shape, rate, mixture_var = self(x_norm)
        norm_dist = Normal(mean, torch.sqrt(var))
        gamma_dist = Gamma(shape, rate)
        y = y_norm * self.y_std + self.y_mean

        y_pred = (mixture_var * (norm_dist.mean * self.y_std + self.y_mean) +
                  (1 - mixture_var) * gamma_dist.mean)
        return torch.sqrt(((y - y_pred)**2).mean())
Esempio n. 25
0
    def init_params_random(self) -> None:
        """
        Sample and set parameter values from the normal-gamma priors model.

        For more details on sampling from a normal-gamma distribution see:
            - https://people.eecs.berkeley.edu/~jordan/courses/260-spring10/lectures/lecture5.pdf # noqa: E501
            - https://www.cs.ubc.ca/~murphyk/Papers/bayesGauss.pdf
        """
        prec_m = Gamma(self.prec_alpha_prior,
                       self.prec_beta_prior)
        self.precs = prec_m.sample()

        means_m = MultivariateNormal(loc=self.means_prior,
                                     precision_matrix=(self.n0 *
                                                       self.prec_alpha_prior /
                                                       self.prec_beta_prior
                                                       ).diag())
        self.means = means_m.sample()
Esempio n. 26
0
 def log_joint(self, generative, x, z, tau, mu):
     ll = generative.log_prob(x, z=z, tau=tau, mu=mu, aggregate=True)
     log_prior_tau = Gamma(
         generative.prior_alpha,
         generative.prior_beta).log_prob(tau).sum(-1).sum(-1)
     log_prior_mu = Normal(
         generative.prior_mu, 1. /
         (generative.prior_nu * tau).sqrt()).log_prob(mu).sum(-1).sum(-1)
     log_prior_z = cat(probs=generative.prior_pi).log_prob(z).sum(-1)
     return (ll + log_prior_tau + log_prior_mu + log_prior_z)
Esempio n. 27
0
    def forward(self, x):
        """Return sample of latent variable and log prob."""
        x = x.type(torch.FloatTensor)
        scale_arg = self.inference_network(x)
        scale = self.softplus(scale_arg)

        all_gama = Gamma(scale, self.all_beta)
        all_dir = Dirichlet(scale)
        scores = kl0._kl_dirichlet_dirichlet(all_dir, self.target_function)
        scores = scores.mean(dim=-1)

        zrnd = all_gama.sample(sample_shape=(self.n_samples, ))
        #find params

        mz = zrnd.mean(dim=0)
        z_dir = Dirichlet(mz)
        z_score = kl0._kl_dirichlet_dirichlet(z_dir, self.target_function)
        z_score = z_score.mean(dim=-1)
        scores = torch.unsqueeze(scores, dim=-1)
        z_score = torch.unsqueeze(z_score, dim=-1)

        return zrnd, scores, z_score
Esempio n. 28
0
def tbi_func(x, v):
    """
    Evaluate gamma-GP-Bernoulli mixture likelihood
    Parameters:
    ----------
    v: torch.Tensor(batch*86, channels)
        parameters from model
    x: torch.Tensor(batch*86)
        target vals to eval at
    """
    # Gamma distribution
    g = Gamma(concentration=v[:, 2], rate=v[:, 3])
    gamma = torch.exp(torch.clamp(g.log_prob(x), min=-1e5, max=1e5))

    # Weight term
    weight_term = (1 / 2) + (1 / np.pi) * torch.atan((x - v[:, 5]) / v[:, 6])

    # GP distribution
    gp = (1 / v[:, 4]) * (1 + (v[:, 1] * x / v[:, 4]))**((-1 / v[:, 1]) - 1)

    # total
    tbi = gamma * (1 - weight_term) + gp * weight_term
    return torch.clamp(tbi, min=1e-5)
Esempio n. 29
0
 def reparameterize(self, alpha, beta):
     """
     :alpha:  is the shape/concentration
     :beta:   is the rate/(1/scale)
     """
     # sample the \hat{z} ~ Gamma(shape + B, 1.) to guarantee acceptance
     new_alpha = alpha.clone()
     new_alpha = Variable(new_alpha + self.gamma_shape,
                          requires_grad=False)
     z_hat = Gamma(new_alpha, torch.ones(alpha.shape)).sample()
     # compute the epsilon corresponding to \hat{z}; this epsilon is 'accepted'
     # \epsilon = h_inverse(z_tilde; shape + B)
     eps = self.compute_h_inverse(z_hat, alpha + self.gamma_shape)
     # now compute z_tilde = h(epsilon, alpha + gamma_shape)
     z_tilde = self.compute_h(eps, alpha + self.gamma_shape)
     return z_tilde / beta
Esempio n. 30
0
class PreprocessingIntNoisyFromAmp:
    def __init__(self, flag_bayes=False):
        from torch.distributions.gamma import Gamma
        self.gen_dist = Gamma(torch.tensor([1.0]), torch.tensor([1.0]))
        self.flag_bayes = flag_bayes
    def __call__(self, target):
        if self.flag_bayes:
            target = target + 1 / scale_img
        target = target ** 2
        noise = self.gen_dist.sample(target.shape)[:, :, :, :, 0]
        mask  = torch.ones(target.shape)
        if target.is_cuda:
            noise = noise.cuda()
            mask  = mask.cuda()
        noisy = target * noise
        return noisy, target, mask
Esempio n. 31
0
def create_Gamma_samples(cfg, sample_size, Gamma_target):
    overall_scres = []

    for i in range(sample_size):
        zrnd = Gamma_target.sample(sample_shape=(10 * sample_size, ))
        mz = zrnd.mean(dim=0)
        ms = zrnd.var(dim=0)
        beta = mz / ms
        alpha = mz * beta

        create_g = Gamma(alpha, beta)
        score = kl0._kl_gamma_gamma(create_g, Gamma_target)
        score = convert_metric_2_score(torch.unsqueeze(score, dim=-1))

        overall_scres.append(score)

        # overall_scres.append(score.detach().numpy()[0])
        # print (overall_scres)
    return overall_scres
Esempio n. 32
0
class FisherSnedecor(Distribution):
    r"""
    Creates a Fisher-Snedecor distribution parameterized by `df1` and `df2`.

    Example::

        >>> m = FisherSnedecor(torch.tensor([1.0]), torch.tensor([2.0]))
        >>> m.sample()  # Fisher-Snedecor-distributed with df1=1 and df2=2
         0.2453
        [torch.FloatTensor of size 1]

    Args:
        df1 (float or Tensor): degrees of freedom parameter 1
        df2 (float or Tensor): degrees of freedom parameter 2
    """
    arg_constraints = {'df1': constraints.positive, 'df2': constraints.positive}
    support = constraints.positive
    has_rsample = True

    def __init__(self, df1, df2, validate_args=None):
        self.df1, self.df2 = broadcast_all(df1, df2)
        self._gamma1 = Gamma(self.df1 * 0.5, self.df1)
        self._gamma2 = Gamma(self.df2 * 0.5, self.df2)

        if isinstance(df1, Number) and isinstance(df2, Number):
            batch_shape = torch.Size()
        else:
            batch_shape = self.df1.size()
        super(FisherSnedecor, self).__init__(batch_shape, validate_args=validate_args)

    @property
    def mean(self):
        df2 = self.df2.clone()
        df2[df2 <= 2] = float('nan')
        return df2 / (df2 - 2)

    @property
    def variance(self):
        df2 = self.df2.clone()
        df2[df2 <= 4] = float('nan')
        return 2 * df2.pow(2) * (self.df1 + df2 - 2) / (self.df1 * (df2 - 2).pow(2) * (df2 - 4))

    def rsample(self, sample_shape=torch.Size(())):
        shape = self._extended_shape(sample_shape)
        #   X1 ~ Gamma(df1 / 2, 1 / df1), X2 ~ Gamma(df2 / 2, 1 / df2)
        #   Y = df2 * df1 * X1 / (df1 * df2 * X2) = X1 / X2 ~ F(df1, df2)
        X1 = self._gamma1.rsample(sample_shape).view(shape)
        X2 = self._gamma2.rsample(sample_shape).view(shape)
        X2.clamp_(min=_finfo(X2).tiny)
        Y = X1 / X2
        Y.clamp_(min=_finfo(X2).tiny)
        return Y

    def log_prob(self, value):
        if self._validate_args:
            self._validate_sample(value)
        ct1 = self.df1 * 0.5
        ct2 = self.df2 * 0.5
        ct3 = self.df1 / self.df2
        t1 = (ct1 + ct2).lgamma() - ct1.lgamma() - ct2.lgamma()
        t2 = ct1 * ct3.log() + (ct1 - 1) * torch.log(value)
        t3 = (ct1 + ct2) * torch.log1p(ct3 * value)
        return t1 + t2 - t3