Пример #1
0
    def __init__(self, args):
        self.use_cuda = args.cuda and torch.cuda.is_available()
        self.max_epoch = args.max_epoch
        self.global_epoch = 0
        self.global_iter = 0

        self.z_dim = args.z_dim
        self.z_var = args.z_var
        self.z_sigma = math.sqrt(args.z_var)
        self._lambda = args.reg_weight
        self.lr = args.lr
        self.beta1 = args.beta1
        self.beta2 = args.beta2
        self.lr_schedules = {30: 2, 50: 5, 100: 10}

        if args.dataset.lower() == 'celeba':
            self.nc = 3
            self.decoder_dist = 'gaussian'
        else:
            raise NotImplementedError

        net = WAE
        self.net = cuda(net(self.z_dim, self.nc), self.use_cuda)
        self.optim = optim.Adam(self.net.parameters(),
                                lr=self.lr,
                                betas=(self.beta1, self.beta2))

        self.gather = DataGather()
        self.viz_name = args.viz_name
        self.viz_port = args.viz_port
        self.viz_on = args.viz_on
        if self.viz_on:
            self.viz = visdom.Visdom(env=self.viz_name + '_lines',
                                     port=self.viz_port)
            self.win_recon = None
            self.win_mmd = None
            self.win_mu = None
            self.win_var = None

        self.ckpt_dir = Path(args.ckpt_dir).joinpath(args.viz_name)
        if not self.ckpt_dir.exists():
            self.ckpt_dir.mkdir(parents=True, exist_ok=True)
        self.ckpt_name = args.ckpt_name
        if self.ckpt_name is not None:
            self.load_checkpoint(self.ckpt_name)

        self.save_output = args.save_output
        self.output_dir = Path(args.output_dir).joinpath(args.viz_name)
        if not self.output_dir.exists():
            self.output_dir.mkdir(parents=True, exist_ok=True)

        self.dset_dir = args.dset_dir
        self.dataset = args.dataset
        self.batch_size = args.batch_size
        self.data_loader = return_data(args)
Пример #2
0
    def sample_z(self, n_sample=None, dim=None, sigma=None, template=None):
        if n_sample is None:
            n_sample = self.batch_size
        if dim is None:
            dim = self.z_dim
        if sigma is None:
            sigma = self.z_sigma

        if template is not None:
            z = sigma * Variable(template.data.new(template.size()).normal_())
        else:
            z = sigma * torch.randn(n_sample, dim)
            z = Variable(cuda(z, self.use_cuda))

        return z
Пример #3
0
 def save_reconstruction(self):
     self.net.eval()
     import numpy as np
     for item in self.data_loader:
         x = Variable(cuda(item, self.use_cuda))
         x_recon, z_tilde = self.net(x)
         x_recon = x_recon.data[:5]
         x = x.data[:5]
         #x_grid = make_grid(x, normalize=True, nrow=10)
         #x_recon = F.sigmoid(x_recon)
         #x_grid_recon = make_grid(x_recon, normalize=True, nrow=10)
         #images = torch.stack([x_grid, x_grid_recon], dim=0).cpu()
         images = torch.stack([x, x_recon], dim=0).cpu()
         np.save('reconstruction.npy', images.numpy())
         break
     self.net.train()
Пример #4
0
    def train(self):
        self.net.train()

        iters_per_epoch = len(self.data_loader)
        max_iter = self.max_epoch * iters_per_epoch
        pbar = tqdm(total=max_iter)
        with tqdm(total=max_iter) as pbar:
            pbar.update(self.global_iter)
            out = False
            while not out:
                for x in self.data_loader:
                    pbar.update(1)
                    self.global_iter += 1
                    if self.global_iter % iters_per_epoch == 0:
                        self.global_epoch += 1
                    self.optim = multistep_lr_decay(self.optim,
                                                    self.global_epoch,
                                                    self.lr_schedules)

                    x = Variable(cuda(x, self.use_cuda))
                    x_recon, z_tilde = self.net(x)
                    z = self.sample_z(template=z_tilde, sigma=self.z_sigma)

                    recon_loss = F.mse_loss(
                        x_recon, x, size_average=False).div(self.batch_size)
                    mmd_loss = mmd(z_tilde, z, z_var=self.z_var)
                    total_loss = recon_loss + self._lambda * mmd_loss

                    self.optim.zero_grad()
                    total_loss.backward()
                    self.optim.step()

                    if self.global_iter % 1000 == 0:
                        self.gather.insert(
                            iter=self.global_iter,
                            mu=z.mean(0).data,
                            var=z.var(0).data,
                            recon_loss=recon_loss.data,
                            mmd_loss=mmd_loss.data,
                        )

                    if self.global_iter % 5000 == 0:
                        self.gather.insert(images=x.data)
                        self.gather.insert(images=x_recon.data)
                        self.viz_reconstruction()
                        self.viz_lines()
                        self.sample_x_from_z(n_sample=100)
                        self.gather.flush()
                        self.save_checkpoint('last')
                        pbar.write(
                            '[{}] total_loss:{:.3f} recon_loss:{:.3f} mmd_loss:{:.3f}'
                            .format(self.global_iter, total_loss.data[0],
                                    recon_loss.data[0], mmd_loss.data[0]))

                    if self.global_iter % 20000 == 0:
                        self.save_checkpoint(str(self.global_iter))

                    if self.global_iter >= max_iter:
                        out = True
                        break

            pbar.write("[Training Finished]")
Пример #5
0
    def train(self):
        self.net.train()

        ones = Variable(cuda(torch.ones(self.batch_size, 1), self.use_cuda))
        zeros = Variable(cuda(torch.zeros(self.batch_size, 1), self.use_cuda))

        iters_per_epoch = len(self.data_loader)
        max_iter = self.max_epoch * iters_per_epoch
        pbar = tqdm(total=max_iter)
        with tqdm(total=max_iter) as pbar:
            pbar.update(self.global_iter)
            out = False
            while not out:
                for x in self.data_loader:
                    #x,label = x
                    pbar.update(1)
                    self.global_iter += 1
                    if self.global_iter % iters_per_epoch == 0:
                        self.global_epoch += 1
                    self.optim = multistep_lr_decay(self.optim,
                                                    self.global_epoch,
                                                    self.lr_schedules)

                    x = Variable(cuda(x, self.use_cuda))
                    x_recon, z_tilde = self.net(x)
                    z = self.sample_z(template=z_tilde, sigma=self.z_sigma)
                    log_p_z = log_density_igaussian(z, self.z_var).view(-1, 1)

                    #D_z = self.D(z) + log_p_z.view(-1, 1)
                    #D_z_tilde = self.D(z_tilde) + log_p_z.view(-1, 1)
                    D_z = self.D(z)
                    D_z_tilde = self.D(z_tilde)
                    D_loss = F.binary_cross_entropy_with_logits(D_z+log_p_z, ones) + \
                             F.binary_cross_entropy_with_logits(D_z_tilde+log_p_z, zeros)
                    total_D_loss = self._lambda * D_loss

                    self.optim_D.zero_grad()
                    total_D_loss.backward(retain_graph=True)
                    self.optim_D.step()

                    recon_loss = F.mse_loss(
                        x_recon, x, size_average=False).div(self.batch_size)
                    Q_loss = F.binary_cross_entropy_with_logits(
                        D_z_tilde + log_p_z, ones)
                    total_AE_loss = recon_loss + self._lambda * Q_loss

                    self.optim.zero_grad()
                    total_AE_loss.backward()
                    self.optim.step()

                    if self.global_iter % 10 == 0:
                        self.gather.insert(
                            iter=self.global_iter,
                            D_z=F.sigmoid(D_z).mean().detach().data,
                            D_z_tilde=F.sigmoid(
                                D_z_tilde).mean().detach().data,
                            mu=z.mean(0).data,
                            var=z.var(0).data,
                            recon_loss=recon_loss.data,
                            Q_loss=Q_loss.data,
                            D_loss=D_loss.data)

                    if self.global_iter % 50 == 0:
                        self.save_reconstruction()
                        if self.viz:
                            self.gather.insert(images=x.data)
                            self.gather.insert(images=x_recon.data)
                            self.viz_reconstruction()
                            self.viz_lines()
                            self.sample_x_from_z(n_sample=100)
                            self.gather.flush()
                            self.save_checkpoint('last')
                            pbar.write(
                                '[{}] recon_loss:{:.3f} Q_loss:{:.3f} D_loss:{:.3f}'
                                .format(self.global_iter, recon_loss.data[0],
                                        Q_loss.data[0], D_loss.data[0]))
                            pbar.write('D_z:{:.3f} D_z_tilde:{:.3f}'.format(
                                F.sigmoid(D_z).mean().detach().data[0],
                                F.sigmoid(D_z_tilde).mean().detach().data[0]))

                    if self.global_iter % 2000 == 0:
                        self.save_checkpoint(str(self.global_iter))

                    if self.global_iter >= max_iter:
                        out = True
                        break

            pbar.write("[Training Finished]")