def map_x_to_distribution(self,
                           x: torch.Tensor) -> distributions.Distribution:
     distr = self.distribution_class(
         picnn=self.picnn,
         hidden_state=x[..., :-2],
         prediction_length=self.prediction_length,
         is_energy_score=self.is_energy_score,
         es_num_samples=self.es_num_samples,
         beta=self.beta,
     )
     # rescale
     loc = x[..., -2][:, None]
     scale = x[..., -1][:, None]
     scaler = distributions.AffineTransform(loc=loc, scale=scale)
     if self._transformation is None:
         return self.transformed_distribution_class(distr, [scaler])
     else:
         return self.transformed_distribution_class(
             distr,
             [
                 scaler,
                 TorchNormalizer.get_transform(
                     self._transformation)["inverse_torch"]
             ],
         )
Exemple #2
0
def logistic_distribution(loc: Tensor, scale: Tensor):
    base_distribution = td.Uniform(loc.new_zeros(1), scale.new_zeros(1))
    transforms = [
        td.SigmoidTransform().inv,
        td.AffineTransform(loc=loc, scale=scale)
    ]
    return td.TransformedDistribution(base_distribution, transforms)
 def __init__(self, base_distribution, low, high, **kwargs):
     squash = TanhTransform(cache_size=1)
     shift = dists.AffineTransform(loc=(high + low) / 2,
                                   scale=(high - low) / 2,
                                   cache_size=1,
                                   event_dim=1)
     super().__init__(base_distribution, [squash, shift], **kwargs)
Exemple #4
0
 def __init__(self, loc, scale, **kwargs):
     loc, scale = map(torch.as_tensor, (loc, scale))
     base_distribution = ptd.Uniform(torch.zeros_like(loc),
                                     torch.ones_like(loc), **kwargs)
     transforms = [
         ptd.SigmoidTransform().inv,
         ptd.AffineTransform(loc=loc, scale=scale),
     ]
     super().__init__(base_distribution, transforms)
def logistic_distribution(loc, log_scale):
    scale = torch.exp(log_scale) + 1e-5
    base_distribution = distributions.Uniform(torch.zeros_like(loc),
                                              torch.ones_like(loc))
    transforms = [
        LogisticTransform(),
        distributions.AffineTransform(loc=loc, scale=scale)
    ]
    logistic = distributions.TransformedDistribution(base_distribution,
                                                     transforms)
    return logistic
 def map_x_to_distribution(self, x: torch.Tensor) -> distributions.Normal:
     distr = self.distribution_class(loc=x[..., 2], scale=x[..., 3])
     scaler = distributions.AffineTransform(loc=x[..., 0], scale=x[..., 1])
     if self._transformation is None:
         return distributions.TransformedDistribution(distr, [scaler])
     else:
         return distributions.TransformedDistribution(
             distr, [
                 scaler,
                 TorchNormalizer.get_transform(
                     self._transformation)["inverse_torch"]
             ])
    def __init__(self, loc: torch.Tensor, scale: torch.Tensor):
        self.loc, self.scale = dist.utils.broadcast_all(loc, scale)

        zero, one = torch.Tensor([0, 1]).type_as(loc)

        base_distribution = dist.Uniform(zero, one).expand(self.loc.shape)
        transforms = [
            dist.SigmoidTransform().inv,
            dist.AffineTransform(loc=self.loc, scale=self.scale)
        ]

        super(Logistic, self).__init__(base_distribution, transforms)
 def map_x_to_distribution(self, x: torch.Tensor) -> distributions.Normal:
     x = x.permute(1, 0, 2)
     distr = self.distribution_class(
         loc=x[..., 2],
         cov_factor=x[..., 4:],
         cov_diag=x[..., 3],
     )
     scaler = distributions.AffineTransform(loc=x[0, :, 0],
                                            scale=x[0, :, 1],
                                            event_dim=1)
     if self._transformation is None:
         return distributions.TransformedDistribution(distr, [scaler])
     else:
         return distributions.TransformedDistribution(
             distr, [
                 scaler,
                 TorchNormalizer.get_transform(
                     self._transformation)["inverse_torch"]
             ])
Exemple #9
0
 def __init__(self,
              locs: torch.Tensor,
              log_scales: torch.Tensor,
              log_weights: torch.Tensor,
              mean_log_inter_time: float = 0.0,
              std_log_inter_time: float = 1.0):
     mixture_dist = D.Categorical(logits=log_weights)
     component_dist = Normal(loc=locs, scale=log_scales.exp())
     GMM = MixtureSameFamily(mixture_dist, component_dist)
     if mean_log_inter_time == 0.0 and std_log_inter_time == 1.0:
         transforms = []
     else:
         transforms = [
             D.AffineTransform(loc=mean_log_inter_time,
                               scale=std_log_inter_time)
         ]
     self.mean_log_inter_time = mean_log_inter_time
     self.std_log_inter_time = std_log_inter_time
     transforms.append(D.ExpTransform())
     super().__init__(GMM, transforms)
            samples = samples.view((batch_size, ) + self.final_shape)
            return self.inverse(samples, max_iter=max_iter)

    def set_num_terms(self, n_terms):
        for block in self.stack:
            for layer in block.stack:
                layer.numSeriesTerms = n_terms


if __name__ == "__main__":
    scale = 1.
    loc = 0.
    base_distribution = distributions.Uniform(0., 1.)
    transforms_1 = [
        distributions.SigmoidTransform().inv,
        distributions.AffineTransform(loc=loc, scale=scale)
    ]
    logistic_1 = distributions.TransformedDistribution(base_distribution,
                                                       transforms_1)

    transforms_2 = [
        LogisticTransform(),
        distributions.AffineTransform(loc=loc, scale=scale)
    ]
    logistic_2 = distributions.TransformedDistribution(base_distribution,
                                                       transforms_2)

    x = torch.zeros(2)
    print(logistic_1.log_prob(x), logistic_2.log_prob(x))
    1 / 0
Exemple #11
0
            # only send batch_size to prior, prior has final_shape as attribute
            samples = self.prior().rsample((batch_size,))
            samples = samples.view((batch_size,) + self.final_shape)
            return self.inverse(samples, max_iter=max_iter)

    def set_num_terms(self, n_terms):
        for block in self.stack:
            for layer in block.stack:
                layer.numSeriesTerms = n_terms


if __name__ == "__main__":
    scale = 1.
    loc = 0.
    base_distribution = distributions.Uniform(0., 1.)
    transforms_1 = [distributions.SigmoidTransform().inv, distributions.AffineTransform(loc=loc, scale=scale)]
    logistic_1 = distributions.TransformedDistribution(base_distribution, transforms_1)

    transforms_2 = [LogisticTransform(), distributions.AffineTransform(loc=loc, scale=scale)]
    logistic_2 = distributions.TransformedDistribution(base_distribution, transforms_2)

    x = torch.zeros(2)
    print(logistic_1.log_prob(x), logistic_2.log_prob(x))
    1/0

    diff = lambda x, y: (x - y).abs().sum()
    batch_size = 13
    channels = 3
    h, w = 32, 32
    in_shape = (batch_size, channels, h, w)
    x = torch.randn((batch_size, channels, h, w), requires_grad=True)
Exemple #12
0
def sample(args):
    """
    Performs the following:
    1. construct model object & load state dict from saved model;
    2. make H x W samples from a set of gaussian or logistic prior on the latent space;
    3. save to disk as a grid of images.
    """
    # parse settings:
    if args.dataset == 'mnist':
        input_dim = 28 * 28
        img_height = 28
        img_width = 28
        img_depth = 1
    if args.dataset == 'svhn':
        input_dim = 32 * 32 * 3
        img_height = 32
        img_width = 32
        img_depth = 3
    if args.dataset == 'cifar10':
        input_dim = 32 * 32 * 3
        img_height = 32
        img_width = 32
        img_depth = 3
    if args.dataset == 'tfd':
        raise NotImplementedError(
            "[sample] Toronto Faces Dataset unsupported right now. Sorry!")
        input_dim = None
        img_height = None
        img_width = None
        img_depth = None

    # shut off gradients for sampling:
    torch.set_grad_enabled(False)

    # build model & load state dict:
    nice = NICEModel(input_dim, args.nhidden, args.nlayers)
    if args.model_path is not None:
        nice.load_state_dict(torch.load(args.model_path, map_location='cpu'))
        print("[sample] Loaded model from file.")
    nice.eval()

    # sample a batch:
    if args.prior == 'logistic':
        LOGISTIC_LOC = 0.0
        LOGISTIC_SCALE = (3. / (np.pi**2))  # (sets variance to 1)
        logistic = dist.TransformedDistribution(dist.Uniform(0.0, 1.0), [
            dist.SigmoidTransform().inv,
            dist.AffineTransform(loc=LOGISTIC_LOC, scale=LOGISTIC_SCALE)
        ])
        print(
            "[sample] sampling from logistic prior with loc={0:.4f}, scale={1:.4f}."
            .format(LOGISTIC_LOC, LOGISTIC_SCALE))
        ys = logistic.sample(torch.Size([args.nrows * args.ncols, input_dim]))
        xs = nice.inverse(ys)
    if args.prior == 'gaussian':
        print("[sample] sampling from gaussian prior with loc=0.0, scale=1.0.")
        ys = torch.randn(args.nrows * args.ncols, input_dim)
        xs = nice.inverse(ys)

    # format sample into images of correct shape:
    image_batch = unflatten_images(xs, img_depth, img_height, img_width)

    # arrange into a grid and save to file:
    torchvision.utils.save_image(image_batch,
                                 args.save_image_path,
                                 nrow=args.nrows)
    print("[sample] Saved {0}-by-{1} sampled images to {2}.".format(
        args.nrows, args.ncols, args.save_image_path))