Exemplo n.º 1
0
        elif b1:
            image_pr = torch.from_numpy(_normalize(hmaps)).float().cuda()
        elif b2:
            image_pr = torch.from_numpy(_normalize(pmaps)).float().cuda()

        # get real and fake inputs
        real = image_hr
        y_c, prs, fake = G(image_lr)

        real_label = torch.from_numpy(np.ones((real.size(0), 1, 8, 8))).float().cuda()
        fake_label = torch.from_numpy(np.zeros((real.size(0), 1, 8, 8))).float().cuda()
        real_label.requires_grad = False
        fake_label.requires_grad = False

        # train discriminator
        D.zero_grad()
        loss_c1 = criterion_BCE(D(real), real_label)
        loss_c2 = criterion_BCE(D(fake.detach()), fake_label)
        loss_c = r_c * (loss_c1 + loss_c2)
        loss_c.backward()
        losses_D.append(loss_c.data)
        optimizer_D.step()

        # train generator
        G.zero_grad()
        loss_f1 = criterion_MSE(y_c, real)
        loss_f2 = a * criterion_MSE(fake, real)
        loss_f3 = b * criterion_MSE(prs, image_pr)
        loss_f = loss_f1 + loss_f2 + loss_f3
        loss_p = r_p * criterion_MSE(F(fake), F(real).detach())
Exemplo n.º 2
0
class GAN_CLS(object):
    def __init__(self, args, data_loader, SUPERVISED=True):
        """
        Arguments :
        ----------
        args : Arguments defined in Argument Parser
        data_loader = An instance of class DataLoader for loading our dataset in batches
        SUPERVISED :

        """

        self.data_loader = data_loader
        self.num_epochs = args.num_epochs
        self.batch_size = args.batch_size

        self.log_step = config.log_step
        self.sample_step = config.sample_step

        self.log_dir = args.log_dir
        self.checkpoint_dir = args.checkpoint_dir
        self.sample_dir = config.sample_dir
        self.final_model = args.final_model

        self.dataset = args.dataset
        self.model_name = args.model_name

        self.img_size = args.img_size
        self.z_dim = args.z_dim
        self.text_embed_dim = args.text_embed_dim
        self.text_reduced_dim = args.text_reduced_dim
        self.learning_rate = args.learning_rate
        self.beta1 = args.beta1
        self.beta2 = args.beta2
        self.l1_coeff = args.l1_coeff
        self.resume_epoch = args.resume_epoch
        self.SUPERVISED = SUPERVISED

        # Logger setting
        self.logger = logging.getLogger('__name__')
        self.logger.setLevel(logging.INFO)
        self.formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
        self.file_handler = logging.FileHandler(self.log_dir)
        self.file_handler.setFormatter(self.formatter)
        self.logger.addHandler(self.file_handler)

        self.build_model()

    def build_model(self):
        """ A function of defining following instances :

        -----  Generator
        -----  Discriminator
        -----  Optimizer for Generator
        -----  Optimizer for Discriminator
        -----  Defining Loss functions

        """

        # ---------------------------------------------------------------------
        #						1. Network Initialization
        # ---------------------------------------------------------------------
        self.gen = Generator(batch_size=self.batch_size,
                             img_size=self, img_size,
                             z_dim=self.z_dim,
                             text_embed_dim=self.text_embed_dim,
                             text_reduced_dim=self.text_reduced_dim)

        self.disc = Discriminator(batch_size=self.batch_size,
                                  img_size=self, img_size,
                                  text_embed_dim=self.text_embed_dim,
                                  text_reduced_dim=self.text_reduced_dim)

        self.gen_optim = optim.Adam(self.gen.parameters(),
                                    lr=self.learning_rate,
                                    betas=(self.beta1, self.beta2))

        self.disc_optim = optim.Adam(self.disc.parameters(),
                                     lr=self.learning_rate,
                                     betas=(self.beta1, self.beta2))

        self.cls_gan_optim = optim.Adam(itertools.chain(self.gen.parameters(),
                                                        self.disc.parameters()),
                                        lr=self.learning_rate,
                                        betas=(self.beta1, self.beta2))

        print ('-------------  Generator Model Info  ---------------')
        self.print_network(self.gen, 'G')
        print ('------------------------------------------------')

        print ('-------------  Discriminator Model Info  ---------------')
        self.print_network(self.disc, 'D')
        print ('------------------------------------------------')

        self.gen.cuda()
        self.disc.cuda()
        self.criterion = nn.BCELoss().cuda()
        # self.CE_loss = nn.CrossEntropyLoss().cuda()
        # self.MSE_loss = nn.MSELoss().cuda()
        self.gen.train()
        self.disc.train()

    def print_network(self, model, name):
        """ A function for printing total number of model parameters """
        num_params = 0
        for p in model.parameters():
            num_params += p.numel()

        print(model)
        print(name)
        print("Total number of parameters: {}".format(num_params))

    def load_checkpoints(self, resume_epoch):
        """Restore the trained generator and discriminator."""
        print('Loading the trained models from step {}...'.format(resume_epoch))
        G_path = os.path.join(self.checkpoint_dir, '{}-G.ckpt'.format(resume_epoch))
        D_path = os.path.join(self.checkpoint_dir, '{}-D.ckpt'.format(resume_epoch))
        self.gen.load_state_dict(torch.load(G_path, map_location=lambda storage, loc: storage))
        self.disc.load_state_dict(torch.load(D_path, map_location=lambda storage, loc: storage))

    def train_model(self):

        data_loader = self.data_loader

        start_epoch = 0
        if self.resume_epoch:
            start_epoch = self.resume_epoch
            self.load_checkpoints(self.resume_epoch)

        print ('---------------  Model Training Started  ---------------')
        start_time = time.time()

        for epoch in range(start_epoch, self.num_epochs):
            for idx, batch in enumerate(data_loader):
                true_imgs = batch['true_imgs']
                true_embed = batch['true_embed']
                false_imgs = batch['false_imgs']

                real_labels = torch.ones(true_imgs.size(0))
                fake_labels = torch.zeros(true_imgs.size(0))

                smooth_real_labels = torch.FloatTensor(Utils.smooth_label(real_labels.numpy(), -0.1))

                true_imgs = Variable(true_imgs.float()).cuda()
                true_embed = Variable(true_embed.float()).cuda()
                false_imgs = Variable(false_imgs.float()).cuda()

                real_labels = Variable(real_labels).cuda()
                smooth_real_labels = Variable(smooth_real_labels).cuda()
                fake_labels = Variable(fake_labels).cuda()

                # ---------------------------------------------------------------
                # 					  2. Training the generator
                # ---------------------------------------------------------------
                self.gen.zero_grad()
                z = Variable(torch.randn(true_imgs.size(0), self.z_dim)).cuda()
                fake_imgs = self.gen(true_embed, z)
                fake_out, fake_logit = self.disc(fake_imgs, true_embed)
                true_out, true_logit = self.disc(true_imgs, true_embed)

                gen_loss = self.criterion(fake_out, real_labels) +
                    self.l1_coeff * nn.L1Loss(fake_imgs, true_imgs)

                gen_loss.backward()
                self.gen_optim.step()

                # ---------------------------------------------------------------
                # 					3. Training the discriminator
                # ---------------------------------------------------------------
                self.disc.zero_grad()
                false_out, false_logit = self.disc(false_imgs, true_embed)
                disc_loss = self.criterion(true_out, smooth_real_labels) +
                    self.criterion(fake_out, fake_labels) + self.criterion(false_out, fake_labels)

                disc_loss.backward()
                self.disc_optim.step()

                # self.cls_gan_optim.step()

                # Logging
                loss = {}
                loss['G_loss'] = gen_loss.item()
                loss['D_loss'] = disc_loss.item()

                # ---------------------------------------------------------------
                # 					4. Logging INFO into log_dir
                # ---------------------------------------------------------------
                if (idx + 1) % self.log_step == 0:
                    end_time = time.time() - start_time
                    end_time = datetime.timedelta(seconds=end_time)
                    log = "Elapsed [{}], Epoch [{}/{}], Idx [{}]".format(end_time, epoch + 1,
                                                                         self.num_epochs, idx)

                for net, loss_value in loss.items():
                    log += ", {}: {:.4f}".format(net, loss_value)
                    self.logger.info(log)
                    print (log)

                # ---------------------------------------------------------------
                # 					5. Saving generated images
                # ---------------------------------------------------------------
                if (idx + 1) % self.sample_step == 0:
                    concat_imgs = torch.cat((true_imgs, fake_imgs), 2)  # ??????????
                    save_path = os.path.join(self.sample_dir, '{}-images.jpg'.format(idx + 1))
                    cocat_imgs = (cocat_imgs + 1) / 2
                    # out.clamp_(0, 1)
                    save_image(concat_imgs.data.cpu(), self.sample_dir, nrow=1, padding=0)
                    print ('Saved real and fake images into {}...'.format(self.sample_dir))

                # ---------------------------------------------------------------
                # 				6. Saving the checkpoints & final model
                # ---------------------------------------------------------------
                if (idx + 1) % self.model_save_step == 0:
                    G_path = os.path.join(self.checkpoint_dir, '{}-G.ckpt'.format(idx + 1))
                    D_path = os.path.join(self.checkpoint_dir, '{}-D.ckpt'.format(idx + 1))
                    torch.save(self.gen.state_dict(), G_path)
                    torch.save(self.disc.state_dict(), D_path)
                    print('Saved model checkpoints into {}...'.format(self.checkpoint_dir))
Exemplo n.º 3
0
class GAN3DTrainer(object):
    def __init__(self,
                 logDir,
                 printEvery=1,
                 resume=False,
                 useTensorboard=True):
        super(GAN3DTrainer, self).__init__()

        self.logDir = logDir

        self.currentEpoch = 0
        self.totalBatches = 0

        self.trainStats = {'lossG': [], 'lossD': [], 'accG': [], 'accD': []}

        self.printEvery = printEvery

        self.G = Generator()
        self.D = Discriminator()

        self.device = torch.device('cpu')
        if torch.cuda.is_available():
            self.device = torch.device('cuda:0')

            self.G = self.G.to(self.device)
            self.D = self.D.to(self.device)

            # parallelize models on both devices, splitting input on batch dimension
            self.G = torch.nn.DataParallel(self.G, device_ids=[0, 1])
            self.D = torch.nn.DataParallel(self.D, device_ids=[0, 1])

        # optim params direct from paper
        self.optimG = torch.optim.Adam(self.G.parameters(),
                                       lr=0.0025,
                                       betas=(0.5, 0.999))

        self.optimD = torch.optim.Adam(self.D.parameters(),
                                       lr=0.00005,
                                       betas=(0.5, 0.999))

        if resume:
            self.load()

        self.useTensorboard = useTensorboard
        self.tensorGraphInitialized = False
        self.writer = None
        if useTensorboard:
            self.writer = SummaryWriter(
                os.path.join(self.logDir, 'tensorboard'))

    def train(self, trainData: torch.utils.data.DataLoader):
        epochLoss = 0.0
        numBatches = 0

        self.G.train()
        self.D.train()

        for i, sample in enumerate(tqdm(trainData)):
            data = sample['data']

            self.optimG.zero_grad()
            self.G.zero_grad()

            self.optimD.zero_grad()
            self.D.zero_grad()

            realVoxels = torch.zeros(data['62'].shape[0], 64, 64,
                                     64).to(self.device)
            realVoxels[:, 1:-1, 1:-1, 1:-1] = data['62'].to(self.device)

            # discriminator train
            z = torch.normal(torch.zeros(data['62'].shape[0], 200),
                             torch.ones(data['62'].shape[0], 200) * 0.33).to(
                                 self.device)

            fakeVoxels = self.G(z)
            fakeD = self.D(fakeVoxels)
            realD = self.D(realVoxels)

            lossD = -torch.mean(torch.log(realD) + torch.log(1. - fakeD))
            accD = ((realD >= .5).float().mean() +
                    (fakeD < .5).float().mean()) / 2.
            accG = (fakeD > .5).float().mean()

            # only train if Disc wrong enough :)
            if accD < .8:
                self.D.zero_grad()
                lossD.backward()
                self.optimD.step()

            # gen train
            z = torch.normal(torch.zeros(data['62'].shape[0], 200),
                             torch.ones(data['62'].shape[0], 200) * 0.33).to(
                                 self.device)

            fakeVoxels = self.G(z)
            fakeD = self.D(fakeVoxels)

            # https://arxiv.org/pdf/1706.05170.pdf (IV. Methods, A. Training the gen model)
            lossG = -torch.mean(torch.log(fakeD))

            self.D.zero_grad()
            self.G.zero_grad()
            lossG.backward()
            self.optimG.step()

            #log
            numBatches += 1
            if i % self.printEvery == 0:
                tqdm.write(
                    f'[TRAIN] Epoch {self.currentEpoch:03d}, Batch {i:03d}: '
                    f'gen: {float(accG.item()):2.3f}, dis = {float(accD.item()):2.3f}'
                )

                if (self.useTensorboard):
                    self.writer.add_scalar('GenLoss/train', lossG,
                                           numBatches + self.totalBatches)
                    self.writer.add_scalar('DisLoss/train', lossD,
                                           numBatches + self.totalBatches)
                    self.writer.add_scalar('GenAcc/train', accG,
                                           numBatches + self.totalBatches)
                    self.writer.add_scalar('DisAcc/train', accD,
                                           numBatches + self.totalBatches)
                    self.writer.flush()

                    if not self.tensorGraphInitialized:
                        #TODO: why can't I push graph?
                        tempZ = torch.autograd.Variable(
                            torch.rand(data['62'].shape[0], 200, 1, 1,
                                       1)).cuda(1)
                        self.writer.add_graph(self.G.module, tempZ)
                        self.writer.flush()

                        self.writer.add_graph(self.D.module, fakeVoxels)
                        self.writer.flush()

                        self.tensorGraphInitialized = True

        #self.trainLoss.append(epochLoss)
        self.currentEpoch += 1
        self.totalBatches += numBatches

    def save(self):
        logTable = {
            'epoch': self.currentEpoch,
            'totalBatches': self.totalBatches
        }

        torch.save(self.G.state_dict(),
                   os.path.join(self.logDir, 'generator.pth'))
        torch.save(self.D.state_dict(), os.path.join(self.logDir,
                                                     'discrim.pth'))
        torch.save(self.optimG.state_dict(),
                   os.path.join(self.logDir, 'optimG.pth'))
        torch.save(self.optimD.state_dict(),
                   os.path.join(self.logDir, 'optimD.pth'))

        with open(os.path.join(self.logDir, 'recent.log'), 'w') as f:
            f.write(json.dumps(logTable))

        pickle.dump(self.trainStats,
                    open(os.path.join(self.logDir, 'trainStats.pkl'), 'wb'))

        tqdm.write('======== SAVED RECENT MODEL ========')

    def load(self):
        self.G.load_state_dict(
            torch.load(os.path.join(self.logDir, 'generator.pth')))
        self.D.load_state_dict(
            torch.load(os.path.join(self.logDir, 'discrim.pth')))
        self.optimG.load_state_dict(
            torch.load(os.path.join(self.logDir, 'optimG.pth')))
        self.optimD.load_state_dict(
            torch.load(os.path.join(self.logDir, 'optimD.pth')))

        with open(os.path.join(self.logDir, 'recent.log'), 'r') as f:
            runData = json.load(f)

        self.trainStats = pickle.load(
            open(os.path.join(self.logDir, 'trainStats.pkl'), 'rb'))

        self.currentEpoch = runData['epoch']
        self.totalBatches = runData['totalBatches']
Exemplo n.º 4
0
def train(dataloader,
          num_epochs,
          net,
          run_settings,
          learning_rate=0.0002,
          optimizerD='Adam'):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # Create the nets
    generator = Generator(net).to(device)
    discriminator = Discriminator(net).to(device)

    # Apply the weights_init function to randomly initialize all weights
    generator.apply(weights_init)
    discriminator.apply(weights_init)

    # Initialize BCELoss function
    criterion = nn.BCELoss()

    # Create batch of latent vectors that we will use to visualize
    #  the progression of the generator
    fixed_noise = torch.randn(64, nz, 1, 1, device=device)

    # Establish convention for real and fake labels during training
    real_label = 1.
    fake_label = 0.

    beta1 = 0.5

    # Setup Adam optimizers for both G and D
    if optimizerD == 'SGD':
        optimizerD = optim.SGD(discriminator.parameters(), lr=learning_rate)
    else:
        optimizerD = optim.Adam(discriminator.parameters(),
                                lr=learning_rate,
                                betas=(beta1, 0.999))
    optimizerG = optim.Adam(generator.parameters(),
                            lr=learning_rate,
                            betas=(beta1, 0.999))

    # Lists to keep track of progress
    img_list = []
    G_losses = []
    D_losses = []
    iters = 0

    print("Starting Training Loop...")
    for epoch in range(num_epochs):
        for i, data in enumerate(dataloader, 0):
            ## Train with all-real batch
            discriminator.zero_grad()
            # Format batch
            real_cpu = data[0].to(device)
            b_size = real_cpu.size(0)
            label = torch.full((b_size, ),
                               real_label,
                               dtype=torch.float,
                               device=device)
            # Forward pass real batch through D
            output = discriminator(real_cpu).view(-1)
            # Calculate loss on all-real batch
            errD_real = criterion(output, label)
            # Calculate gradients for D in backward pass
            errD_real.backward()
            D_x = output.mean().item()

            ## Train with all-fake batch
            # Generate batch of latent vectors
            noise = torch.randn(b_size, nz, 1, 1, device=device)
            # Generate fake image batch with G
            fake = generator(noise)
            label.fill_(fake_label)
            # Classify all fake batch with D
            output = discriminator(fake.detach()).view(-1)
            # Calculate D's loss on the all-fake batch
            errD_fake = criterion(output, label)
            # Calculate the gradients for this batch
            errD_fake.backward()
            D_G_z1 = output.mean().item()
            # Add the gradients from the all-real and all-fake batches
            errD = errD_real + errD_fake
            # Update D
            optimizerD.step()

            ############################
            # (2) Update G network: maximize log(D(G(z)))
            ###########################
            generator.zero_grad()
            label.fill_(real_label)  # fake labels are real for generator cost
            # Since we just updated D, perform another forward pass of all-fake batch through D
            output = discriminator(fake).view(-1)
            # Calculate G's loss based on this output
            errG = criterion(output, label)
            # Calculate gradients for G
            errG.backward()
            D_G_z2 = output.mean().item()
            # Update G
            optimizerG.step()

            # Output training stats
            if i % 3 == 0:
                print(
                    '[%d/%d][%d/%d]\t\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
                    % (epoch + 1, num_epochs, i + 1, len(dataloader),
                       errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))

            # Save Losses for plotting later
            G_losses.append(errG.item())
            D_losses.append(errD.item())

            # Check how the generator is doing by saving its output on fixed_noise
            if (iters %
                (len(dataloader) * 50) == 0) or ((epoch == num_epochs - 1) and
                                                 (i == len(dataloader) - 1)):
                with torch.no_grad():
                    fake = generator(fixed_noise).detach().cpu()
                img_list.append(
                    vutils.make_grid(fake, padding=2, normalize=True))

            iters += 1

    print("finished")

    for i in range(len(img_list)):
        plt.imshow(np.transpose(img_list[i], (1, 2, 0)))
        plt.savefig('generated_images_' + str(i) + '.png')

    plt.imshow(np.transpose(img_list[-1], (1, 2, 0)))
    plt.savefig('generated_images_' + run_settings + '.png')

    plt.figure(figsize=(10, 5))
    plt.title("Generator and Discriminator Loss During Training")
    plt.plot(G_losses, label="G")
    plt.plot(D_losses, label="D")
    plt.xlabel("Iterations")
    plt.ylabel("Loss")
    plt.legend()
    plt.savefig('loss_graph_' + run_settings + '.png')
Exemplo n.º 5
0
def gan_augment(x, y, seed, n_samples=None):
    if n_samples is None:
        n_samples = len(x)

    lr = 3e-4
    num_ep = 300
    z_dim = 100
    model_path = "./gan_checkpoint_%d.pth" % seed

    device = "cuda" if torch.cuda.is_available() else "cpu"
    G = Generator(z_dim).to(device)
    D = Discriminator(z_dim).to(device)
    bce_loss = nn.BCELoss()
    G_optim = optim.Adam(G.parameters(), lr=lr * 3, betas=(0.5, 0.999))
    D_optim = optim.Adam(D.parameters(), lr=lr, betas=(0.5, 0.999))

    batch = 64
    train_x = torch.Tensor(x)
    train_labels = torch.LongTensor(y)

    if os.path.exists(model_path):
        print("load trained GAN...")
        state = torch.load(model_path)
        G.load_state_dict(state["G"])
    else:
        print("training a new GAN...")
        for epoch in range(num_ep):
            for _ in range(len(train_x) // batch):
                idx = np.random.choice(range(len(train_x)), batch)
                batch_x = train_x[idx].to(device)
                batch_labels = train_labels[idx].to(device)

                y_real = torch.ones(batch).to(device)
                y_fake = torch.zeros(batch).to(device)

                # train D with real images
                D.zero_grad()
                D_real_out = D(batch_x, batch_labels).squeeze()
                D_real_loss = bce_loss(D_real_out, y_real)

                # train D with fake images
                z_ = torch.randn((batch, z_dim)).view(-1, z_dim, 1,
                                                      1).to(device)
                fake_labels = torch.randint(0, 10, (batch, )).to(device)
                G_out = G(z_, fake_labels)

                D_fake_out = D(G_out, fake_labels).squeeze()
                D_fake_loss = bce_loss(D_fake_out, y_fake)
                D_loss = D_real_loss + D_fake_loss
                D_loss.backward()
                D_optim.step()

                # train G
                G.zero_grad()
                z_ = torch.randn((batch, z_dim)).view(-1, z_dim, 1,
                                                      1).to(device)
                fake_labels = torch.randint(0, 10, (batch, )).to(device)
                G_out = G(z_, fake_labels)
                D_out = D(G_out, fake_labels).squeeze()
                G_loss = bce_loss(D_out, y_real)
                G_loss.backward()
                G_optim.step()

            plot2img(G_out[:50].cpu())
            print("epoch: %d G_loss: %.2f D_loss: %.2f" %
                  (epoch, G_loss, D_loss))
        state = {"G": G.state_dict(), "D": D.state_dict()}
        torch.save(state, model_path)

    with torch.no_grad():
        z_ = torch.randn((n_samples, z_dim)).view(-1, z_dim, 1, 1).to(device)
        fake_labels = torch.randint(0, 10, (n_samples, )).to(device)
        G_samples = G(z_, fake_labels)
        samples = G_samples.cpu().numpy().reshape((-1, 28, 28, 1))
    return samples, fake_labels.cpu().numpy()