Example #1
0
def test_normal_errors():
    model = Normal()
    assert model._diag_type == 'diag'
    model._diag_type = 'FAKE'
    try:
        model.log_prob(None)
    except NotImplementedError:
        pass
    try:
        model.sample(4)
    except NotImplementedError:
        pass
    try:
        model.entropy()
    except NotImplementedError:
        pass
    try:
        model.scale
    except NotImplementedError:
        pass

    model = Normal([0., 0.], [3., 1.0, 1., 3.])
    assert model._diag_type == 'cholesky'
    try:
        model.cdf(5.)
    except NotImplementedError:
        pass
    try:
        model.icdf(5.)
    except NotImplementedError:
        pass
Example #2
0
 def sample(self, X, compute_logprob=False):
     dist = Normal(F.linear(X, self.W),
                   self.noise * torch.eye(self.K),
                   learnable=False)
     z = dist.sample(1).squeeze(0)
     if compute_logprob:
         return z, dist.log_prob(z)
     return z
Example #3
0
class ProbabilisticPCA(Distribution):

    has_latents = True

    def __init__(self, D, K=2, noise=1., tau=None):
        super().__init__()
        self.D = D
        self.K = K
        self.W = Parameter(torch.Tensor(D, K).float())
        self.noise = torch.tensor(noise)
        self.latent = Normal(torch.zeros(K), torch.ones(K), learnable=False)
        self.tau = tau
        self.prior = None
        if tau:
            self.prior = Normal(torch.zeros(K),
                                torch.full((K, ), tau),
                                learnable=False)

        self.reset_parameters()

    def reset_parameters(self):
        init.kaiming_uniform_(self.W, a=math.sqrt(5))

    def prior_probability(self, z):
        if self.prior is None:
            return 0.
        return self.prior.log_prob(z)

    def log_prob(self, X, z):
        dist = Normal(F.linear(z, self.W),
                      torch.full((z.size(0), self.D), self.noise),
                      learnable=False)
        return dist.log_prob(X) + self.prior_probability(z)

    def sample(self, z=None, batch_size=1):
        if z is None:
            if self.prior is None:
                raise ValueError(
                    'PPCA has no prior distribution to sample latents from, please set tau in init'
                )
            z = self.prior.sample(batch_size)
        dist = Normal(F.linear(z, self.W),
                      torch.full((z.size(0), self.D), self.noise),
                      learnable=False)
        return dist.sample(1).squeeze(0)

    def fit(self, X, variational_dist=None, elbo_kwargs={}, **kwargs):
        if variational_dist is None:
            variational_dist = PPCA_Variational_V2(self)

        data = Data(X)
        stats = train(data, self, ELBO(variational_dist, **elbo_kwargs),
                      **kwargs)
        return stats

    def transform(self, X):
        return X.mm(self.W)
Example #4
0
 def sample(self, z=None, batch_size=1):
     if z is None:
         if self.prior is None:
             raise ValueError(
                 'PPCA has no prior distribution to sample latents from, please set tau in init'
             )
         z = self.prior.sample(batch_size)
     dist = Normal(F.linear(z, self.W),
                   torch.full((z.size(0), self.D), self.noise),
                   learnable=False)
     return dist.sample(1).squeeze(0)
Example #5
0
def test_normal_affine():
    model = Normal(1.0, 4.0)
    transform = TransformDistribution(Normal(0.0, 1.0),
                                      [Affine(1.0, 2.0)])

    x = model.sample(4)
    assert torch.all(transform.log_prob(x)- model.log_prob(x) < 1e-5)

    x = transform.sample(4)
    assert torch.all(transform.log_prob(x)- model.log_prob(x) < 1e-5)

    transform.get_parameters()
Example #6
0
class VAE(Distribution):

    has_latents = True

    def __init__(self,
                 encoder_args={},
                 decoder_args={},
                 prior=None,
                 elbo_kwargs={}):
        super().__init__()
        preset_encoder_args = {
            'input_dim': 1,
            'hidden_sizes': [24, 24],
            'activation': 'ReLU',
            'output_shapes': [1, 1],
            'output_activations': [None, 'Softplus'],
            'distribution': partial(Normal, learnable=False)
        }
        preset_decoder_args = {
            'input_dim': 1,
            'hidden_sizes': [24, 24],
            'activation': 'ReLU',
            'output_shapes': [1],
            'output_activations': [Sigmoid()],
            'distribution': partial(Bernoulli, learnable=False)
        }

        preset_encoder_args.update(encoder_args)
        preset_decoder_args.update(decoder_args)

        self.encoder = ConditionalModel(**preset_encoder_args)
        self.decoder = ConditionalModel(**preset_decoder_args)
        self.criterion = ELBO(self.encoder, **elbo_kwargs)

        self.prior = prior
        if prior is None:
            latent_dim = preset_decoder_args['input_dim']
            self.prior = Normal(torch.zeros(latent_dim),
                                torch.ones(latent_dim),
                                learnable=False)

    def log_prob(self, X, Z=None):
        # latent given
        if Z is not None:
            return self.decoder.log_prob(X, Z) + self.prior.log_prob(Z)

        Z, encoder_probs = self.encoder.sample(X, compute_logprob=True)
        prior_probs = self.prior.log_prob(Z)
        decoder_log_probs = self.decoder.log_prob(X, Z)
        return decoder_log_probs + prior_probs - encoder_probs

    def sample(self, batch_size, compute_logprob=False):
        Z = self.prior.sample(batch_size)
        return self.decoder.sample(Z, compute_logprob)

    def fit(self, x, use_elbo=True, **kwargs):
        data = Data(x)
        if use_elbo:
            return train(data, self, self.criterion, **kwargs)
        return train(data, self, cross_entropy, **kwargs)

    def parameters(self):
        for name, param in self.named_parameters(recurse=True):
            if 'encoder' in name:
                continue
            yield param