Esempio n. 1
0
    def test_sum_except_batch(self):
        x1 = torch.ones(10)
        x1_sum = sum_except_batch(x1)
        self.assertEqual(x1_sum, torch.ones(10))

        x2 = torch.ones(10, 5)
        x2_sum = sum_except_batch(x2)
        self.assertEqual(x2_sum, torch.ones(10) * 5)
Esempio n. 2
0
 def log_prior(self, theta):
     theta = self.scale_theta(theta)
     if self.prior == 'uniform':
         return 0
     else:
         prior = Normal(loc=self.prior_loc, scale=self.prior_scale)
         if self.num_bits is None:
             return sum_except_batch(prior.log_prob(theta))
         else:
             return sum_except_batch(torch.log(prior.cdf(theta+1)-prior.cdf(theta)+1e-12))
 def forward(self, x):
     # The "+ 1e-45" bit is for numerical stability. Otherwise the ldj will be -inf where any element of x is around
     # 6.0 or greater, since torch.tanh() returns 1.0 around that point. This way it maxes out around -103.2789.
     z = torch.tanh(x)
     ldj = torch.log(1 - z**2 + 1e-45)
     ldj = sum_except_batch(ldj)
     return z, ldj
Esempio n. 4
0
 def _elementwise_forward(self, x, elementwise_params):
     assert elementwise_params.shape[-1] == self._output_dim_multiplier()
     z, ldj_elementwise = splines.linear_spline(x,
                                                elementwise_params,
                                                inverse=False)
     ldj = sum_except_batch(ldj_elementwise)
     return z, ldj
Esempio n. 5
0
 def _elementwise_forward(self, x, elementwise_params):
     unconstrained_scale, shift = self._unconstrained_scale_and_shift(
         elementwise_params)
     log_scale = 2. * torch.tanh(unconstrained_scale / 2.)
     z = shift + torch.exp(log_scale) * x
     ldj = sum_except_batch(log_scale)
     return z, ldj
Esempio n. 6
0
 def _elementwise_forward(self, x, elementwise_params):
     assert elementwise_params.shape[-1] == self._output_dim_multiplier()
     unconstrained_scale, shift = self._unconstrained_scale_and_shift(elementwise_params)
     scale = self.scale_fn(unconstrained_scale)
     z = scale * x + shift
     ldj = sum_except_batch(torch.log(scale))
     return z, ldj
Esempio n. 7
0
 def sample_with_log_prob(self, context, feedback=None):
     mean, std = self.cond_dist_params(context, feedback=feedback)
     dist = HalfNormal(std)
     z = dist.rsample() + mean
     log_prob = dist.log_prob(z)
     log_prob = sum_except_batch(log_prob)
     return z, log_prob
Esempio n. 8
0
 def log_prob(self, x):
     log_scaling = math.log(2)
     log_base = -0.5 * math.log(2 * math.pi)
     log_inner = -0.5 * x**2
     log_probs = log_scaling + log_base + log_inner
     log_probs[x < 0] = -math.inf
     return sum_except_batch(log_probs)
Esempio n. 9
0
 def forward(self, z):
     assert torch.min(z) >= 0 and torch.max(
         z
     ) <= 1, f'input must be in [0,1] not [{z.min().data.item()}, {z.max().data.item()}]'
     z = torch.clamp(z, self.eps, 1 - self.eps)
     x = (1 / self.temperature) * (torch.log(z) - torch.log1p(-z))
     ldj = sum_except_batch(-torch.log(z) - torch.log(1.0 - z))
     return x, ldj
 def forward(self, x):
     '''
     z = softplus(x) = log(1+exp(z))
     ldj = log(dsoftplus(x)/dx) = log(1/(1+exp(-x))) = log(sigmoid(x))
     '''
     z = F.softplus(x)
     ldj = sum_except_batch(F.logsigmoid(x))
     return z, ldj
Esempio n. 11
0
 def forward(self, x):
     s = (x[:, self.element].sign() + 1) / 2
     z = x
     z[:, self.element] = x[:, self.element].abs()
     logit_pi = self.classifier(z)
     ldj = sum_except_batch(
         -F.binary_cross_entropy_with_logits(logit_pi, s, reduction='none'))
     return z, ldj
    def forward(self, x):
        assert torch.min(x) >= 0 and torch.max(x) <= 1, 'x must be in [0,1]'
        x = torch.clamp(x, self.eps, 1 - self.eps)

        z = (1 / self.temperature) * (torch.log(x) - torch.log1p(-x))
        ldj = -sum_except_batch(
            torch.log(self.temperature) - F.softplus(-self.temperature * z) -
            F.softplus(self.temperature * z))
        return z, ldj
Esempio n. 13
0
 def _elementwise_forward(self, x, elementwise_params):
     assert elementwise_params.shape[-1] == self._output_dim_multiplier()
     unnormalized_widths, unnormalized_heights = elementwise_params[
         ..., :self.num_bins], elementwise_params[..., self.num_bins:]
     z, ldj_elementwise = splines.quadratic_spline(
         x,
         unnormalized_widths=unnormalized_widths,
         unnormalized_heights=unnormalized_heights,
         inverse=False)
     ldj = sum_except_batch(ldj_elementwise)
     return z, ldj
Esempio n. 14
0
    def _elementwise_forward(self, x, elementwise_params):
        assert elementwise_params.shape[-1] == self._output_dim_multiplier()
        unconstrained_scale, shift, logit_weights, means, log_scales = get_flowpp_params(elementwise_params, num_mixtures=self.num_mixtures)
        scale = self.scale_fn(unconstrained_scale)
        log_scales = log_scales.clamp(min=-7)  # From the code in original Flow++ paper

        x, ldj_elementwise = logistic_mixture_transform(inputs=x,
                                                        logit_weights=logit_weights,
                                                        means=means,
                                                        log_scales=log_scales,
                                                        eps=self.eps,
                                                        max_iters=self.max_iters,
                                                        inverse=False)

        # affine transformation
        z = scale * x + shift
        logistic_ldj = sum_except_batch(ldj_elementwise)
        scale_ldj = sum_except_batch(torch.log(scale))
        ldj = logistic_ldj + scale_ldj
        return z, ldj
 def forward(self, x):
     '''
     z = softplus_inv(x) = log(exp(x)-1) = x + log(1-exp(-x))
     ldj = log(dsoftplus_inv(x)/dx)
         = log(exp(x)/(exp(x)-1))
         = log(1/(1-exp(-x)))
         = -log(1-exp(-x))
     '''
     xc = x.clamp(self.eps)
     z = xc + torch.log1p(-torch.exp(-xc))
     ldj = -sum_except_batch(torch.log1p(-torch.exp(-xc)))
     return z, ldj
Esempio n. 16
0
    def forward(self, x):
        assert torch.min(x) >= 0 and torch.max(x) <= 1, 'x must be in [0,1]'
        #x = torch.clamp(x, self.eps, 1 - self.eps)
        x = self.eps + (1.0 - 2.0 * self.eps) * x

        z = (1.0 / self.temperature) * (torch.log(x) - torch.log1p(-x))
        ldj = -sum_except_batch(
            torch.log(self.temperature) - F.softplus(-self.temperature * z) -
            F.softplus(self.temperature * z))
        #ldj = - sum_except_batch(torch.log(x - x * x) + math.log(1.0 - 2.0 * self.eps))

        return z, ldj
Esempio n. 17
0
 def _elementwise_forward(self, x, elementwise_params):
     assert elementwise_params.shape[-1] == self._output_dim_multiplier()
     unnormalized_widths = elementwise_params[..., :self.num_bins]
     unnormalized_heights = elementwise_params[..., self.num_bins:2*self.num_bins]
     unnorm_derivatives_left = elementwise_params[..., 2*self.num_bins:2*self.num_bins+1]
     unnorm_derivatives_right = elementwise_params[..., 2*self.num_bins+1:]
     z, ldj_elementwise = splines.cubic_spline(x,
                                               unnormalized_widths=unnormalized_widths,
                                               unnormalized_heights=unnormalized_heights,
                                               unnorm_derivatives_left=unnorm_derivatives_left,
                                               unnorm_derivatives_right=unnorm_derivatives_right,
                                               inverse=False)
     ldj = sum_except_batch(ldj_elementwise)
     return z, ldj
Esempio n. 18
0
    def _elementwise(self, inputs, elementwise_params, inverse):
        assert elementwise_params.shape[-1] == self._output_dim_multiplier()

        logit_weights, means, log_scales = get_mixture_params(elementwise_params, num_mixtures=self.num_mixtures)

        x = logistic_mixture_transform(inputs=inputs,
                                       logit_weights=logit_weights,
                                       means=means,
                                       log_scales=log_scales,
                                       eps=self.eps,
                                       max_iters=self.max_iters,
                                       inverse=inverse)

        if inverse:
            return x
        else:
            z, ldj_elementwise = x
            ldj = sum_except_batch(ldj_elementwise)
            return z, ldj
Esempio n. 19
0
 def log_prob(self, x, context, should_sum=True, feedback=None):
     mean, std = self.cond_dist_params(context, feedback=feedback)
     dist = Normal(torch.zeros(mean.shape, device=mean.device), torch.ones(std.shape, device=std.device))
     adjusted_x = (x - mean) / std
     adjusted_a = (0 - mean) / std
     log_gx = dist.log_prob(adjusted_x)
     log_c = ((1 - dist.cdf(adjusted_a)) * std).log()
     log_prob = log_gx - log_c
     # return sum_except_batch(dist.log_prob((x - mean).abs()))
     '''
     # Folded normal distribution
     mean, std = self.cond_dist_params(context)
     dist1 = Normal(mean, std)
     dist2 = Normal(-mean, std)
     log_prob = (dist1.log_prob(x).exp() + dist2.log_prob(x).exp()).log()
     '''
     if should_sum:
         return sum_except_batch(log_prob)
     else:
         return log_prob
Esempio n. 20
0
 def log_prob(self, x):
     log_base =  - 0.5 * math.log(2 * math.pi) - self.log_scale
     log_inner = - 0.5 * torch.exp(-2 * self.log_scale) * ((x - self.loc) ** 2)
     return sum_except_batch(log_base+log_inner)
 def forward(self, x):
     x = self.temperature * x
     z = torch.sigmoid(x)
     ldj = sum_except_batch(
         torch.log(self.temperature) - F.softplus(-x) - F.softplus(x))
     return z, ldj
 def forward(self, x):
     z = torch.tanh(x)
     ldj = torch.log(1 - z**2)
     ldj = sum_except_batch(ldj)
     return z, ldj
 def forward(self, x):
     z = (x + self.alpha * (torch.sqrt(1 + x**2) - 1)) / (1 + self.alpha)
     ldj = torch.log(1 + self.alpha * x /
                     torch.sqrt(1 + x**2)) - math.log(1 + self.alpha)
     ldj = sum_except_batch(ldj)
     return z, ldj
 def forward(self, x):
     z = F.leaky_relu(x, negative_slope=self.negative_slope)
     mask = x < 0
     ldj = self.log_negative_slope * mask.float()
     ldj = sum_except_batch(ldj)
     return z, ldj
Esempio n. 25
0
 def log_prob(self, x):
     log_base = -0.5 * math.log(2 * math.pi) - math.log(self.sigma)
     log_inner = - 0.5 * (x / self.sigma)**2
     return sum_except_batch(log_base+log_inner)
Esempio n. 26
0
 def log_prob_gradient(self, x):
     log_inner = - 0.5 * x**2
     return sum_except_batch(log_inner)
Esempio n. 27
0
 def log_prob_with_mask(self, x, mask):
     log_base =  - 0.5 * math.log(2 * math.pi)
     log_inner = - 0.5 * (x * mask)**2
     return sum_except_batch(log_base+log_inner)
Esempio n. 28
0
 def log_prob_with_mask(self, x, mask):
     log_base = - 0.5 * math.log(2 * math.pi) - self.log_scale.unsqueeze(0).expand(x.shape[0], -1) * mask
     log_inner = - 0.5 * torch.exp(-2 * self.log_scale.unsqueeze(0).expand(x.shape[0], -1) * mask) \
         * ((x * mask - self.loc.unsqueeze(0).expand(x.shape[0], -1) * mask) ** 2)
     return sum_except_batch(log_base + log_inner)
Esempio n. 29
0
 def sample_with_log_prob(self, context, feedback=None):
     dist = self.cond_dist(context, feedback=feedback)
     z = dist.sample()
     log_prob = dist.log_prob(z)
     log_prob = sum_except_batch(log_prob)
     return z.long(), log_prob
Esempio n. 30
0
 def log_prob(self, x, context, feedback=None):
     dist = self.cond_dist(context, feedback=feedback)
     return sum_except_batch(dist.log_prob(x.float()))