Example #1
0
 def transform(self, xb, params, inverse=False):
     shift, scale = chunk_two(params)
     scale = torch.sigmoid(scale + 2.0) + 1e-3
     if not inverse:
         return scale * xb + shift, sumeb(scale.log())
     else:
         return (xb - shift) / scale, -sumeb(scale.log())
Example #2
0
 def get_params(self, x, context=None):
     h = self.linear(x)
     if context is not None and self.ctx_linear is not None:
         h = h + self.ctx_linear(context)
     h = self.mlp(h)
     shift, scale = chunk_two(h)
     scale = F.softplus(scale) + 1e-5
     return shift, scale
Example #3
0
 def log_prob(self, x, context=None):
     if self.use_context and context is not None:
         mu, sigma = chunk_two(self.context_enc(context))
         sigma = F.softplus(sigma) + 1e-5
         eps = (x - mu) / sigma
         return sumeb(self.unit_log_prob(eps) - sigma.log())
     else:
         return sumeb(self.unit_log_prob(x))
Example #4
0
 def get_params(self, x, context=None):
     if x.dim() == 4:
         log_scale = self.log_scale.view(1, -1, 1, 1)
         shift = self.shift.view(1, -1, 1, 1)
         if context is not None and self.linear is not None:
             ctx_log_scale, ctx_shift = chunk_two(self.linear(context))
             B = x.shape[0]
             log_scale = log_scale + ctx_log_scale.view(B, -1, 1, 1)
             shift = shift + ctx_shift.view(B, -1, 1, 1)
         logdet = x.shape[-2] * x.shape[-1] * sumeb(log_scale)
     else:
         log_scale = self.log_scale.view(1, -1)
         shift = self.shift.view(1, -1)
         if context is not None and self.linear is not None:
             ctx_log_scale, ctx_shift = chunk_two(self.linear(context))
             B = x.shape[0]
             log_scale = log_scale + ctx_log_scale.view(B, -1)
             shift = shift + ctx_shift.view(B, -1)
         logdet = sumeb(log_scale)
     return log_scale, shift, logdet
Example #5
0
 def mean(self, context=None, device='cpu'):
     if self.use_context and context is not None:
         mu, sigma = chunk_two(self.context_enc(context))
         sigma = F.softplus(sigma) + 1e-5
         lp = sumeb(self.unit_log_prob(torch.zeros_like(mu)) \
                 - sigma.log())
         return mu, lp
     else:
         mu = torch.zeros(self.infer_shape(), device=device)
         lp = sumeb(self.unit_log_prob(mu))
         return mu, lp
Example #6
0
 def sample(self, num_samples=None, context=None, device='cpu'):
     if self.use_context and context is not None:
         mu, sigma = chunk_two(self.context_enc(context))
         sigma = F.softplus(sigma) + 1e-5
         eps = torch.randn_like(mu)
         x = mu + sigma * eps
         lp = sumeb(self.unit_log_prob(eps) - sigma.log())
         return x, lp
     else:
         eps = torch.randn(self.infer_shape(num_samples), device=device)
         lp = sumeb(self.unit_log_prob(eps))
         return eps, lp
Example #7
0
    def forward(self, x, context=None):
        B = x.shape[0]
        logdet = 0
        finished = []
        for i, transform in enumerate(self.transforms[:-1]):
            x, logdet_ = transform(x, context)
            logdet = logdet + logdet_
            x_, x = chunk_two(x)
            assert x_.shape[1:] == self.output_shapes[i]
            finished.append(x_.view(B, -1))

        x, logdet_ = self.transforms[-1](x, context)
        logdet = logdet + logdet_
        finished.append(x.view(B, -1))
        x = torch.cat(finished, -1)
        return x, logdet
Example #8
0
 def get_params(self, X):
     H = self.mlp(X)
     shift, scale = chunk_two(H)
     scale = F.softplus(scale) + 1e-5
     return shift, scale
Example #9
0
 def inverse(self, x, context=None):
     xa, xb = chunk_two(x)
     xb, logdet = self.transform(xb, self.net(xa, context), inverse=True)
     return cat_two(xa, xb), logdet