Esempio n. 1
0
    def train_GE(self, x, l_recons=None, l_norm=None):
        """

        :param x:
        :param l_recons:
        :param l_norm:
        :return:
        """
        bs = x.size(0)

        real_labels = cuda(Variable(torch.ones(bs)))
        fake_labels = cuda(Variable(torch.zeros(bs)))

        z_e = self.E(x)
        real_logit = self.D(x, z_e)

        z = self.sample_z(bs)

        x_fake = self.G(z)
        fake_logit = self.D(x_fake, z)
        z_norm = torch.sqrt(torch.sum(z_e ** 2, dim=-1))

        g_loss = self.compute_ge_loss(real_logit, fake_logit, fake_labels, real_labels)
        g_loss_out = g_loss.data.cpu().numpy()
        if self.bigan_type != cte.BiGAN:
            x_recons = self.G(z_e)
            x_recons_loss = self.get_x_recons_loss(x, x_recons)
            g_loss += l_recons * x_recons_loss.mean()

        if self.bigan_type in [cte.PMDGAN, cte.MLPMDGAN, cte.EPMDGAN]:
            z_e_loss = self.get_ze_loss(z_e)
            g_loss += l_norm * z_e_loss.mean()

        return g_loss_out, z_norm, g_loss
Esempio n. 2
0
    def train_D(self, x):
        bs = x.size(0)

        real_labels = cuda(Variable(torch.ones(bs)))
        fake_labels = cuda(Variable(torch.zeros(bs)))

        z_e = self.E(x)
        real_logit = self.D(x, z_e)

        z = self.sample_z(bs)

        x_fake = self.G(z)
        fake_logit = self.D(x_fake, z)
        d_loss = self.compute_d_loss(real_logit, real_labels, fake_logit, fake_labels)
        return d_loss
Esempio n. 3
0
 def sample3(self, z):
     z = torch.tensor(z, dtype=torch.float)
     z = Variable(z).detach()
     z = cuda(z)
     x = self.G(z)
     x = x.data.cpu().numpy()
     return x
Esempio n. 4
0
 def sample2(self, z, tensor=False):
     z = torch.tensor(z)
     z = Variable(z).detach()
     z = cuda(z)
     x = self.G(z)
     if not tensor:
         return x.data.cpu().numpy()
     else:
         return x
Esempio n. 5
0
    def psnr(self, loader_sorted):
        psnr_list = list()
        for i, x in enumerate(loader_sorted):
            x = cuda(x)
            z = self.E(x)
            x_recons = self.G(z)
            psnr = loss.PSNR_torch(x, x_recons, axis=(-3, -2, -1))
            psnr = psnr.data.cpu().numpy()
            psnr_list.extend(psnr)

        return np.array(psnr_list)
Esempio n. 6
0
    def get_z(self, x_input):
        n_imgs = x_input.shape[0]
        z_list = list()
        n_batches = int(np.ceil(n_imgs / 128))
        for i in range(n_batches):
            x = torch.tensor(x_input[i * 128:(i + 1) * 128]).float()
            x = cuda(x)
            z = self.E(x)
            z_list.extend(z.data.cpu().numpy())

        z_list = np.array(z_list)
        return z_list
Esempio n. 7
0
    def get_prediction(self, x_input, hard=True):
        n_imgs = x_input.shape[0]
        out_list = list()
        n_batches = int(np.ceil(n_imgs / 128))
        for i in range(n_batches):
            x = torch.tensor(x_input[i * 128:(i + 1) * 128]).float()
            x = lib.cuda(x)
            out = self.forward(x)
            out = torch.argmax(out, dim=1) if hard else F.softmax(out, dim=1)
            out_list.extend(out.data.cpu().numpy())

        return np.array(out_list)
Esempio n. 8
0
 def sample4(self, bs):
     # It’s more efficient than any other autograd setting - it will use
     # the absolute minimal amount of memory to evaluate the model. volatile
     # also determines that requires_grad is False.
     n_bs = bs // 64 + 1
     x_list = list()
     z_list = list()
     for i in range(n_bs):
         z = Variable(torch.randn(64, self.z_dim)).detach()
         z = cuda(z)
         x = self.G(z)
         x_list.extend(x.cpu().data.numpy())
         z_list.extend(z.cpu().data.numpy())
     return np.array(x_list[:bs]), np.array(z_list[:bs])
Esempio n. 9
0
    def get_data_batch(self, data_curr):
        if not self.d_batch_on:
            return data_curr

        if self.bigan_type in [cte.EPMDGAN] and self.data_batch is None:
            self.data_batch = cuda(data_curr)
            return data_curr

        data = torch.cat((cuda(data_curr), self.data_batch), 0)

        self.model.eval()
        data_recons = self.model.G(self.model.E(data))
        psnr = loss_fn.PSNR_torch(data, data_recons, axis=(-3, -2, -1))
        idx_sorted = psnr.argsort(descending=False)  # From smaller to larger
        self.model.train()
        half_batch_size = self.batch_size // 3
        if np.random.uniform() > self.l_perc:
            self.data_batch = cuda(data_curr)
        else:
            self.data_batch = data[idx_sorted[:half_batch_size]]
        self.model.train()
        return data[
            idx_sorted[:self.
                       batch_size]]  # Keep the images with the worst reconstruction
Esempio n. 10
0
 def sample_z(self, bs):
     z = torch.randn((bs, self.z_dim))
     z = cuda(z)
     z = Variable(z)
     return z
Esempio n. 11
0
 def preprocess_batch(self, x):
     data = Variable(x).float()
     data = cuda(data)
     return data