示例#1
0
 def test_reconstruction(self,
                         epoch,
                         beta_kl,
                         beta_mmd,
                         beta_disc,
                         batch_size=256,
                         save_location=None):
     # todo remove betas
     self.eval()
     _, generator_test, _ = self.dataset.data_loaders(batch_size)
     with torch.no_grad():
         for i, (x, m) in enumerate(generator_test):
             x = cuda_variable(x)
             m = cuda_variable(m)
             recon_batch = self.forward(x,
                                        m=m,
                                        beta_kl=beta_kl,
                                        beta_mmd=beta_mmd,
                                        beta_disc=beta_disc,
                                        num_samples=1,
                                        train=False)['samples']
             if i == 0:
                 n = min(x.size(0), 8)
                 comparison = torch.cat(
                     [x[:n],
                      recon_batch.view(batch_size, 1, 28, 28)[:n]])
                 save_image(comparison.cpu(), save_location, nrow=n)
示例#2
0
    def test_variation_lines(self,
                             num_elements_per_dim=20,
                             save_location=None):
        self.eval()
        batch_size = 1
        _, generator_test, _ = self.dataset.data_loaders(batch_size)

        generator_test_it = generator_test.__iter__()
        for _ in range(random.randint(0, len(generator_test))):
            x, _ = next(generator_test_it)

        trajectory = []
        x = cuda_variable(x)
        with torch.no_grad():
            z_star, _, _, _ = self.encoder_z.forward(x.detach())
            alpha_it = self.features.alpha_iterator(
                max_num_dimensions=2,
                num_elements_per_dim=num_elements_per_dim)
            for alpha in alpha_it:
                style_vector = self.features.style_vector_from_alpha(
                    alpha=alpha)

                logdet = cuda_variable(torch.zeros(batch_size))

                z, _, _, _ = self.z_star_to_z.forward(
                    z_star=z_star, logdet=logdet, style_vector=style_vector)

                x_pred, samples = self.decoder.forward(z)
                trajectory.append(x_pred)

        trajectory.append(x)
        trajectory = torch.cat(
            [t.view(batch_size, 1, 28, 28) for t in trajectory], 0)

        save_image(trajectory.cpu(), save_location, nrow=num_elements_per_dim)
示例#3
0
 def random_alpha(self, batch_size):
     rand_label = cuda_variable(
         torch.randint(low=0, high=self.num_values,
                       size=(batch_size, )).long())
     rand_alpha_sigmoid = cuda_variable(
         torch.rand(batch_size, self.num_style_tokens_per_label))
     return rand_label, rand_alpha_sigmoid
示例#4
0
    def alpha_iterator(self,
                       max_num_dimensions=None,
                       num_elements_per_dim=10,
                       offset=0):
        """

        :param max_num_dimensions: Only for sigmoid features
        :param num_elements_per_dim:
        :param offset:
        :return:
        """
        label_iterator = (cuda_variable(torch.Tensor([d]).long())
                          for d in range(self.num_values))

        if max_num_dimensions is None:
            max_num_dimensions = self.num_style_tokens_per_label
        else:
            # label is always the first dimension
            max_num_dimensions = max_num_dimensions - 1
            # todo to remove, investigate why this does not break
            # max_num_dimensions = max_num_dimensions
        g = itertools.product(np.arange(0., 1., 1 / num_elements_per_dim),
                              repeat=max_num_dimensions)
        remaining_dimensions = self.num_style_tokens_per_label - max_num_dimensions

        begin = (0.5, ) * offset
        end = (0.5, ) * (remaining_dimensions - offset)

        alpha_sigmoid_iterator = (cuda_variable(
            torch.Tensor(begin + t + end)).unsqueeze(0) for t in g)

        return itertools.product(label_iterator, alpha_sigmoid_iterator)
示例#5
0
 def eval_batch(self, i, j):
     """
     Get a batch of images in a range with their attributes.
     """
     assert i < j
     batch_x = normalize_images(self.images[i:j].cuda())
     batch_y = self.attributes[i:j].cuda()
     return cuda_variable(batch_x,
                          volatile=True), cuda_variable(batch_y,
                                                        volatile=True)
示例#6
0
    def forward(self, x):
        batch_size = x.size(0)
        x = x.view(batch_size, -1)
        context = self.context(x)

        # normalizing flow
        noise = cuda_variable(torch.randn(batch_size, self.z_dim))
        logdet = cuda_variable(torch.zeros(batch_size))
        z_samples, logdet, _ = self.inf((noise, logdet, context))
        return z_samples, logdet, context, noise
示例#7
0
    def alpha_iterator(self,
                       max_num_dimensions=None,
                       num_elements_per_dim=10,
                       offset=0):
        """

        :param max_num_dimensions:
        :param num_elements_per_dim:
        :param offset:
        :return: alphas have a batch_size of 1
        """
        if max_num_dimensions is None:
            max_num_dimensions = self.alpha_dim
        else:
            max_num_dimensions = min(self.alpha_dim, max_num_dimensions)
        g = itertools.product(np.arange(0., 1., 1 / num_elements_per_dim),
                              repeat=max_num_dimensions)
        remaining_dimensions = self.alpha_dim - max_num_dimensions

        begin = (0.5, ) * offset
        end = (0.5, ) * (remaining_dimensions - offset)

        alpha_gen = (cuda_variable(torch.Tensor(begin + t + end)).unsqueeze(0)
                     for t in g)
        return alpha_gen
示例#8
0
 def alpha(self, x, m):
     # TODO only used for debug
     if isinstance(x, tuple) or isinstance(x, list):
         batch_size = x[0].size(0)
     else:
         batch_size = x.size(0)
     return cuda_variable(torch.zeros(batch_size))
示例#9
0
 def alpha_iterator(self,
                    max_num_dimensions=None,
                    num_elements_per_dim=None):
     # assert max_num_dimensions is None or max_num_dimensions == 1
     assert num_elements_per_dim is None or max_num_dimensions <= self.num_values
     return (cuda_variable(torch.Tensor([d]).long())
             for d in range(self.num_values))
示例#10
0
    def test_random_variations(self,
                               num_variations=20,
                               num_elements_per_dim=20,
                               save_location=None):
        self.eval()
        batch_size = num_variations
        _, generator_test, _ = self.dataset.data_loaders(batch_size)

        generator_test_it = generator_test.__iter__()
        for _ in range(random.randint(1, len(generator_test))):
            x, m = next(generator_test_it)

        trajectory = []
        x = cuda_variable(x)
        m = cuda_variable(m)
        with torch.no_grad():
            z_star, _, _, _ = self.encoder_z.forward(x.detach())
            z_star = Normal(torch.zeros_like(z_star),
                            torch.ones_like(z_star)).sample()

            alpha = self.features.random_alpha(batch_size)

            # todo to remove
            # alpha = self.features.random_alpha(1)
            # alpha = alpha.repeat(batch_size, 1)
            # alpha = [alpha[0].repeat(batch_size), alpha[1].repeat(batch_size, 1)]

            style_vector = self.features.style_vector_from_alpha(alpha=alpha)

            logdet = cuda_variable(torch.zeros(batch_size))

            z, _, _, _ = self.z_star_to_z.forward(z_star=z_star,
                                                  logdet=logdet,
                                                  style_vector=style_vector)

            x_pred, samples = self.decoder.forward(z)
            trajectory.append(x_pred)

        # trajectory.append(x)
        trajectory = torch.cat(
            [t.view(batch_size, 1, 28, 28) for t in trajectory], 0)

        save_image(trajectory.cpu(), save_location, nrow=num_elements_per_dim)
示例#11
0
    def train_batch(self, bs):
        """
        Get a batch of random images with their attributes.
        """
        # image IDs
        idx = torch.LongTensor(bs).random_(len(self.images))

        # select images / attributes
        batch_x = normalize_images(self.images.index_select(0, idx).cuda())
        batch_y = self.attributes.index_select(0, idx).cuda()

        # data augmentation
        if self.v_flip and np.random.rand() <= 0.5:
            batch_x = batch_x.index_select(
                2,
                torch.arange(batch_x.size(2) - 1, -1, -1).long().cuda())
        if self.h_flip and np.random.rand() <= 0.5:
            batch_x = batch_x.index_select(
                3,
                torch.arange(batch_x.size(3) - 1, -1, -1).long().cuda())

        return cuda_variable(batch_x), cuda_variable(batch_y)
示例#12
0
    def alpha_iterator(self,
                       max_num_dimensions=None,
                       num_elements_per_dim=10,
                       offset=0):
        """

        :param max_num_dimensions:
        :param num_elements_per_dim:
        :param offset:
        :return: alphas have a batch_size of 1
        """
        if max_num_dimensions is None:
            max_num_dimensions = self.num_style_tokens + 1
        g = itertools.product(np.arange(0., 1., 1 / num_elements_per_dim),
                              repeat=max_num_dimensions)
        remaining_dimensions = self.num_style_tokens + 1 - max_num_dimensions

        begin = (0.01, ) * offset
        end = (0.01, ) * (remaining_dimensions - offset)

        alpha_gen = (cuda_variable(torch.Tensor(begin + t + end)).unsqueeze(0)
                     for t in g)
        probs_gen = (t / t.sum(1, keepdim=True) for t in alpha_gen)
        return probs_gen
示例#13
0
文件: varnet.py 项目: zbxzc35/VarNet
    def forward(self,
                x,
                m,
                train,
                beta_kl,
                beta_mmd,
                num_samples=1,
                beta_disc=1.):
        x, m = self.repeat_(x, m, num_samples=num_samples)

        z_star, logdet_z_star, context, noise = self.encoder_z(x)

        # compute alpha_from_input
        style_vector = self.features(x, m)

        # z knowing true tokens
        z, logdet_z, _, u_z = self.z_star_to_z.forward(
            z_star=z_star, logdet=logdet_z_star, style_vector=style_vector)

        # compute weights
        x_reconstruct, samples = self.decoder.forward(z=z, x=x, train=train)

        # KL on z_star
        zero = cuda_variable(torch.zeros(1))
        logqz_star = utils.log_normal(noise, zero, zero).sum(1) - logdet_z_star
        logpz_star = utils.log_normal(z_star, zero, zero).sum(1)
        kl_star = logqz_star - logpz_star

        # free bits or beta?
        # kl = 0.1 * torch.max(kl, torch.ones_like(kl) * self.bits)

        # KL on z
        zero = cuda_variable(torch.zeros(1))
        logqz = utils.log_normal(noise, zero, zero).sum(1) - logdet_z
        logpz = utils.log_normal(z, zero, zero).sum(1)
        kl = logqz - logpz

        # compute z (prior)
        prior_distribution = Normal(torch.zeros_like(z_star),
                                    torch.ones_like(z_star))
        z_prior = prior_distribution.sample()
        z_star_prior = prior_distribution.sample()

        # mmd on z star
        mmd_z_star = mmd_reg(z_tilde=z_star, z=z_star_prior)

        # mmd on z
        mmd_z = mmd_reg(z_tilde=z, z=z_prior)

        ce = self.ce(value=x_reconstruct, target=x)

        disc_reg = self.disc_reg(z_star=z_star, style_vector=style_vector)

        loss = (
            ce + beta_kl * kl_star
            # + beta_kl * kl
            + beta_mmd * mmd_z
            # + beta_mmd * mmd_z_star
            - beta_disc * disc_reg)

        loss = loss.mean()

        acc = self.accuracy(x_reconstruct, x)

        monitored_quantities = dict(loss=loss.item(),
                                    ce=ce.mean().item(),
                                    kl_star=kl_star.mean().item(),
                                    kl=kl.mean().item(),
                                    disc_reg=disc_reg.mean().item(),
                                    mmd_z=mmd_z.mean().item(),
                                    mmd_z_star=mmd_z_star.mean().item(),
                                    acc=acc * 100)

        return dict(loss=loss,
                    monitored_quantities=monitored_quantities,
                    samples=samples)
示例#14
0
    def test_visualization(self,
                           num_elements_per_dim=20,
                           num_curves=6,
                           save_location=None):
        self.eval()
        batch_size = 1
        _, generator_test, _ = self.dataset.data_loaders(batch_size)

        generator_test_it = generator_test.__iter__()
        for _ in range(random.randint(0, len(generator_test))):
            x, m = next(generator_test_it)

        z_trajectory = []
        img_trajectory = []
        x_original = cuda_variable(x)
        x = cuda_variable(x.repeat(num_curves, 1, 1, 1))
        m = cuda_variable(m.repeat(num_curves))
        with torch.no_grad():
            print(self.features.alpha(x, m))
            z_star, _, _, _ = self.encoder_z.forward(x.detach())
            alpha_it = self.features.alpha_iterator(
                max_num_dimensions=2,
                num_elements_per_dim=num_elements_per_dim)

            # original
            style_vector = self.features.forward(x, m)
            logdet = cuda_variable(torch.zeros(batch_size))
            z, _, _, _ = self.z_star_to_z.forward(z_star=z_star,
                                                  logdet=logdet,
                                                  style_vector=style_vector)
            for curve_index, coords in enumerate(z):
                original_line = torch.cat(
                    [
                        coords,
                        cuda_variable(torch.Tensor([curve_index])),
                        cuda_variable(torch.Tensor([-1])),  # -1 for original
                    ],
                    0)
                z_trajectory.append(to_numpy(original_line[None, :]))

            # variation curves
            for alpha_index, alpha in enumerate(alpha_it):
                style_vector = self.features.style_vector_from_alpha(
                    alpha=alpha)

                logdet = cuda_variable(torch.zeros(batch_size))
                z, _, _, _ = self.z_star_to_z.forward(
                    z_star=z_star, logdet=logdet, style_vector=style_vector)
                x_pred, samples = self.decoder.forward(z)
                img_trajectory.append(x_pred)

                for curve_index, coords in enumerate(z):
                    line = torch.cat([
                        coords,
                        cuda_variable(torch.Tensor([curve_index])),
                        cuda_variable(torch.Tensor([alpha_index]))
                    ], 0)

                    z_trajectory.append(to_numpy(line[None, :]))

        img_trajectory = torch.cat(
            [t.view(num_curves, 1, 28, 28) for t in img_trajectory], 0)
        img_trajectory = torch.cat(
            [img_trajectory, x_original.view(1, 1, 28, 28)], 0)

        save_image(img_trajectory.cpu(),
                   'results/img_trajectories_same_x.png',
                   nrow=num_curves)

        z_trajectory = np.concatenate(z_trajectory, axis=0)
        np.savetxt('results/trajectories_same_x.csv',
                   z_trajectory,
                   delimiter=',')

        batch_size = num_curves
        _, generator_test, _ = self.dataset.data_loaders(batch_size)

        generator_test_it = generator_test.__iter__()
        for _ in range(random.randint(0, len(generator_test))):
            x, m = next(generator_test_it)
        trajectory = []
        img_trajectory = []
        x = cuda_variable(x)
        m = cuda_variable(m)

        with torch.no_grad():
            z_star, _, _, _ = self.encoder_z.forward(x.detach())
            alpha_it = self.features.alpha_iterator(
                max_num_dimensions=2,
                num_elements_per_dim=num_elements_per_dim)

            # original
            style_vector = self.features.forward(x, m)
            logdet = cuda_variable(torch.zeros(batch_size))
            z, _, _, _ = self.z_star_to_z.forward(z_star=z_star,
                                                  logdet=logdet,
                                                  style_vector=style_vector)
            for curve_index, coords in enumerate(z):
                original_line = torch.cat(
                    [
                        coords,
                        cuda_variable(torch.Tensor([curve_index])),
                        cuda_variable(torch.Tensor([-1])),  # -1 for original
                    ],
                    0)
                trajectory.append(to_numpy(original_line[None, :]))
            for alpha_index, alpha in enumerate(alpha_it):
                style_vector = self.features.style_vector_from_alpha(
                    alpha=alpha)

                logdet = cuda_variable(torch.zeros(batch_size))
                z, _, _, _ = self.z_star_to_z.forward(
                    z_star=z_star, logdet=logdet, style_vector=style_vector)
                x_pred, samples = self.decoder.forward(z)
                img_trajectory.append(x_pred)

                for curve_index, coords in enumerate(z):
                    line = torch.cat([
                        coords,
                        cuda_variable(torch.Tensor([curve_index])),
                        cuda_variable(torch.Tensor([alpha_index]))
                    ], 0)

                    trajectory.append(to_numpy(line[None, :]))

        img_trajectory = torch.cat(
            [t.view(num_curves, 1, 28, 28) for t in img_trajectory], 0)
        img_trajectory = torch.cat(
            [img_trajectory, x.view(batch_size, 1, 28, 28)], 0)
        save_image(img_trajectory.cpu(),
                   'results/img_trajectories.png',
                   nrow=num_curves)

        trajectory = np.concatenate(trajectory, axis=0)
        np.savetxt('results/trajectories.csv', trajectory, delimiter=',')
示例#15
0
文件: varnet.py 项目: zbxzc35/VarNet
 def preprocessing(self, *tensors):
     x, m = tensors
     return cuda_variable(x), cuda_variable(m)
示例#16
0
 def random_alpha(self, batch_size):
     return cuda_variable(torch.rand(batch_size, self.num_style_tokens))
示例#17
0
 def alpha_iterator(self,
                    max_num_dimensions=None,
                    num_elements_per_dim=10,
                    offset=0):
     alpha_gen = (cuda_variable(torch.zeros(1)) for _ in range(1))
     return alpha_gen
示例#18
0
 def style_vector_from_alpha(self, alpha):
     return cuda_variable(torch.zeros(
         (alpha.size(0), self.style_token_dim)))
示例#19
0
 def random_alpha(self, batch_size):
     probs = torch.rand(batch_size, self.num_style_tokens + 1)
     probs = probs / probs.sum(1, keepdim=True)
     return cuda_variable(probs)
示例#20
0
    def random_alpha(self, batch_size):
        r_a = torch.randint(low=0, high=self.num_values,
                            size=(batch_size, )).long()

        return cuda_variable(r_a)